diff --git a/.gitattributes b/.gitattributes index 122c9a4d8d003253e73fe3075e1b92affa42025b..0f3bff7f3b5bc9127dbecff5d1b074f0e24ee97f 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1172,3 +1172,11 @@ data/2025/2504_10xxx/2504.10449/7f439293-0959-4bd1-95b9-6ff52e6c616f_origin.pdf data/2025/2504_10xxx/2504.10458/7fea48cf-977d-4933-8361-31658163081b_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2504_10xxx/2504.10462/963870cb-6527-42ff-97aa-d1b9f35a156b_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2504_10xxx/2504.10465/9d3901f2-eace-4793-8815-51f41b459e25_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_10xxx/2504.10147/26499afc-f1b2-4507-8b62-4adc6ac17e5f_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_10xxx/2504.10157/9641b7f1-5ad9-4487-b9c3-7734cccafedc_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_10xxx/2504.10160/b4495eb2-d145-42fa-9b61-d633ab2f00d9_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_10xxx/2504.10337/2d1c700e-c8fa-4c7f-b020-a943b3c7241d_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_10xxx/2504.10342/0ae2a017-8bf2-4c96-bb86-70cf5464648b_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_10xxx/2504.10344/cbca1f99-6d1c-4e0d-b299-960633d34c1c_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_10xxx/2504.10358/19c9a29c-ccbf-4591-9305-89a160f95b8c_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_10xxx/2504.10563/79555225-b2fc-4e07-9ca0-9f1bf7db778b_origin.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/data/2025/2504_10xxx/2504.10147/26499afc-f1b2-4507-8b62-4adc6ac17e5f_content_list.json b/data/2025/2504_10xxx/2504.10147/26499afc-f1b2-4507-8b62-4adc6ac17e5f_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..c8ec80ec318b760666b7da70426fbc75741ef759 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10147/26499afc-f1b2-4507-8b62-4adc6ac17e5f_content_list.json @@ -0,0 +1,2535 @@ +[ + { + "type": "text", + "text": "A Survey of Personalization: From RAG to Agent", + "text_level": 1, + "bbox": [ + 114, + 116, + 550, + 135 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "XIAOPENG LI*, City University of Hong Kong, Hong Kong", + "bbox": [ + 112, + 148, + 493, + 166 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "PENGYUE JIA*, City University of Hong Kong, Hong Kong", + "bbox": [ + 116, + 169, + 496, + 185 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "DERONG XU, City University of Hong Kong, Hong Kong and University of Science and Technology of China, China", + "bbox": [ + 116, + 189, + 821, + 205 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "YI WEN, City University of Hong Kong, Hong Kong", + "bbox": [ + 116, + 210, + 444, + 226 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "YINGYI ZHANG, City University of Hong Kong, Hong Kong and Dalian University of Technology, China", + "bbox": [ + 116, + 231, + 772, + 247 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "WENLIN ZHANG, City University of Hong Kong, Hong Kong", + "bbox": [ + 116, + 251, + 514, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "WANYU WANG, City University of Hong Kong, Hong Kong", + "bbox": [ + 116, + 272, + 501, + 287 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "YICHAO WANG, Noah's Ark Lab, Huawei, China", + "bbox": [ + 116, + 292, + 437, + 306 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ZHAOCHENG DU, Noah's Ark Lab, Huawei, China", + "bbox": [ + 116, + 313, + 455, + 328 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "XIANGYANG LI, Noah's Ark Lab, Huawei, China", + "bbox": [ + 116, + 333, + 434, + 348 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "YONG LIU, Noah's Ark Lab, Huawei, Singapore", + "bbox": [ + 116, + 354, + 421, + 369 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "HUIFENG GUO, Noah's Ark Lab, Huawei, China", + "bbox": [ + 116, + 375, + 434, + 388 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "RUIMING TANG†, Noah's Ark Lab, Huawei, China", + "bbox": [ + 116, + 393, + 450, + 409 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "XIANGYU ZHAO†, City University of Hong Kong, Hong Kong", + "bbox": [ + 114, + 415, + 517, + 431 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Personalization has become an essential capability in modern AI systems, enabling customized interactions that align with individual user preferences, contexts, and goals. Recent research has increasingly concentrated on Retrieval-Augmented Generation (RAG) frameworks and their evolution into more advanced agent-based architectures within personalized settings to enhance user satisfaction. Building on this foundation, this survey systematically examines personalization across the three core stages of RAG: pre-retrieval, retrieval, and generation. Beyond RAG, we further extend its capabilities into the realm of Personalized LLM-based Agents, which enhance traditional RAG systems with agentic functionalities, including user understanding, personalized planning and execution, and dynamic generation. For both personalization in RAG and agent-based personalization, we provide formal definitions, conduct a comprehensive review of recent literature, and summarize key datasets and evaluation metrics. Additionally, we discuss fundamental challenges, limitations, and promising research directions in this evolving field. Relevant papers and resources are continuously updated at the Github Repo1.", + "bbox": [ + 112, + 441, + 821, + 597 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "CCS Concepts: $\\cdot$ Information systems $\\rightarrow$ Personalization.", + "bbox": [ + 114, + 613, + 447, + 626 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Additional Key Words and Phrases: Large Language Model, Retrieval-Augmented Generation, Agent, Personalization", + "bbox": [ + 112, + 642, + 741, + 656 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$^{1}$ https://github.com/Applied-Machine-Learning-Lab/Awesome-Personalized-RAG-Agent", + "* Equal contribution.", + "† Corresponding authors." + ], + "bbox": [ + 114, + 678, + 535, + 712 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.", + "bbox": [ + 112, + 736, + 821, + 786 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "© 2018 Copyright held by the owner/author(s). Publication rights licensed to ACM.", + "bbox": [ + 114, + 787, + 506, + 799 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Manuscript submitted to ACM", + "bbox": [ + 116, + 801, + 261, + 811 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.10147v1 [cs.IR] 14 Apr 2025", + "bbox": [ + 22, + 272, + 60, + 708 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 464, + 830, + 470, + 840 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ACM Reference Format:", + "text_level": 1, + "bbox": [ + 174, + 122, + 323, + 133 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Xiaopeng Li*, Pengyue Jia*, Derong Xu, Yi Wen, Yingyi Zhang, Wenlin Zhang, Wanyu Wang, Yichao Wang, Zhaocheng Du, Xiangyang Li, Yong Liu, Huifeng Guo, Ruiming Tang†, and Xiangyu Zhao†. 2018. A Survey of Personalization: From RAG to Agent. In Proceedings of Make sure to enter the correct conference title from your rights confirmation email (Conference acronym 'XX). ACM, New York, NY, USA, 25 pages. https://doi.org/XXXXXXXX.XXXXXXXXXX", + "bbox": [ + 174, + 137, + 883, + 198 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 176, + 215, + 318, + 229 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Large Language Models (LLMs) have revolutionized AI-driven applications by enabling natural language understanding and generation at an unprecedented scale. However, these models often suffer from issues such as outdated responses and hallucinations, which severely hinder the accuracy of information generation. Retrieval-Augmented Generation (RAG) has emerged as a promising framework that integrates retrieved information from external corpora, such as external APIs [13, 36], scientific repositories [86, 124] or domain-specific databases [4, 31], ensuring more knowledge-grounded and up-to-date outputs.", + "bbox": [ + 174, + 238, + 883, + 339 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Its versatility has led to significant applications across various domains, including question answering [115], enterprise search [16] and healthcare [143], etc. Among these applications, one particularly notable area is in agent workflows, where RAG enhances autonomous systems by providing context-aware, dynamically retrieved, and reliable knowledge. This is because each stage of the RAG process closely mirrors key aspects of an agent's workflow, as shown in Figure 1. For instance, the query rewriting phase in RAG, which involves semantic understanding and parsing, aligns with the semantic comprehension stage in agent workflows. Likewise, RAG's retrieval phase, which focuses on extracting the most relevant documents, corresponds to the planning and execution phases of an agent, where decisions are made based on retrieved knowledge. Finally, the generation phase in RAG parallels an agent's execution stage, where actions are performed based on the given task. This structural alignment suggests that the architecture of RAG is fundamentally converging with agent workflows, solidifying its position as a key facilitator of intelligent and autonomous systems.", + "bbox": [ + 174, + 342, + 883, + 512 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Although the structural alignment between RAG and agent workflows highlights their deepening convergence, a critical next step in enhancing these intelligent systems lies in personalization. Personalization is a key driver toward achieving more adaptive and context-aware AI, which is fundamental for the progression toward Artificial General Intelligence (AGI). It plays an essential role in applications such as personalized reasoning [39, 149], adaptive decision-making [72], user-specific content generation [109, 151], and interactive AI systems [73, 92]. However, existing research lacks a comprehensive comparative analysis of personalized RAG and agentic approaches. Current surveys primarily focus on general RAG methodologies [32, 35] or agent-related literature [63, 131, 167], without systematically exploring their implications for personalization. While recent works such as [68, 168] discuss personalization, they predominantly address personalized generation within LLMs or specific downstream tasks, overlooking how personalization can be effectively integrated into RAG and agent workflows.", + "bbox": [ + 174, + 515, + 883, + 685 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Motivated by the above issues, this survey aims to provide a comprehensive review of the integration of personalization into RAG and agentic RAG frameworks to enhance user experiences and optimize satisfaction. The key contributions of this work can be summarized as follows:", + "bbox": [ + 174, + 686, + 883, + 737 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We provide an extensive exploration of the existing literature on how personalization is integrated into various stages of RAG (pre-retrieval, retrieval, and generation) and agentic RAG (understanding, planning, execution, and generation).", + "- We summarize the key datasets, benchmarks, and evaluation metrics used in existing research for each subtask to facilitate future studies in the respective domains." + ], + "bbox": [ + 174, + 744, + 879, + 825 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 176, + 90, + 459, + 102 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "X. Li and P. Jia, et al.", + "bbox": [ + 779, + 90, + 880, + 101 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 524, + 830, + 532, + 839 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/eddf6ee96c6208637db90f8b20f4af142d11c804484f15c3dc8e58752b900628.jpg", + "image_caption": [ + "Fig. 1. Correlation between personalization and RAG with agent flow." + ], + "image_footnote": [], + "bbox": [ + 277, + 123, + 679, + 450 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "- We also highlight the limitations of current research and suggest future directions for personalized RAG, emphasizing potential advancements to address existing challenges.", + "bbox": [ + 114, + 517, + 818, + 549 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The outline of this survey is as follows: we introduce what is personalization (Sec. 2) and explain how personalization is adopted into RAG pipeline (Sec. 3). Then, we present a literature review on where to integrate personalization within different stages of RAG and agentic RAG workflows (Sec. 4) and discuss the key datasets and evaluation metrics used in existing research (Sec.5). Lastly, we present a discussion on the limitations of current research and future directions (Sec. 6).", + "bbox": [ + 114, + 559, + 821, + 642 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 WHAT IS PERSONALIZATION", + "text_level": 1, + "bbox": [ + 114, + 662, + 344, + 674 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Personalization in current research refers to the tailoring of model predictions or generated content to align with an individual's preferences. In the context of RAG and agents, personalization involves incorporating user-specific information at various stages of the RAG pipeline or within agents. User personalization can be categorized into the following types:", + "bbox": [ + 114, + 683, + 818, + 750 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Explicit User Profile: Explicitly presented user information, including biographical details, attributes (e.g., age, location, gender, education), and social connections (e.g., social networks).", + "- User Historical Interactions: Behavioral data, including browsing history, clicks, and purchases, which help infer user interests and preferences to improve personalization." + ], + "bbox": [ + 114, + 758, + 818, + 824 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "A Survey of Personalization: From RAG to Agent", + "bbox": [ + 114, + 90, + 346, + 102 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 539, + 90, + 821, + 101 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 464, + 830, + 472, + 840 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/3888cac0ece5531f078c8ec3406df643f228458552c866b9ad79273c9d66181b.jpg", + "table_caption": [ + "Table 1. Overview of Personalized RAG and Agent." + ], + "table_footnote": [], + "table_body": "
FieldSub-fieldSubsub-fieldPapers
Pre-retrievalQuery RewritingLearning to Personalized Query RewriteCLE-QR [60], CGF [38], PEARL [80]
LLM to Personalized Query RewriteLeast-to-Most Prompting [173], ERAGent [112], CoPS [174], Agent4Ranking [61], FIG [22], BASES [99]
Query ExpansionTagging-based query expansionGossiple [10], Biancalana and Micarelli [12], SoQuES [15], Zhou et al. [172]
ElseLin and Huang [66], Bender et al. [9], Axiomatic PQEC [79], WE-LM [144], PSQE [14], PQEWC [7]
OthersBobo [33], Kannadasan and Aslanyan [52], PSQE [8]
RetrievalIndexingPEARL [80], KG-Retriever [21], EMG-RAG [137], PGraphRAG [5]
RetrievalDense RetrievalMeMemo [138], RECAP [71], LAPDOG [43], Gu et al. [37], PersonalLM [77], UIA [155], XPERT [125], DPSR [157], RTM [11], Pearl [80], MemPrompt [74], EERRA [23], MALP [160], USER-LLM [84], PER-PCS [120]
Sparse RetrievalOPPU [121], PAG [101], Au et al. [5], UniMS-RAG [128], Deng et al. [29],
Prompt-based RetrievalLAPS [50], UniIMP [140], Shen et al. [111]
OthersSalem et al. [103], PersonalITM [65], Zhang et al. [165]
Post-retrievalPersonaRAG [156], Pavliukevich et al. [89], UniMS-RAG [128], Salemi and Zamani [106], Zhang et al. [164], AutoCompressors [24], FIT-RAG [76]
GenerationGeneration from Explicit PreferencesDirect PromptingP² [49], Character Profiling [154] OpinionQA [107], Kang et al. [51], Liu et al. [67], Cue-CoT [129], TICL [26]
Profile-Augmented PromptingGPG [158], Richardson et al. [101], ONCE [70], LLMTreeRec [163], KAR [145], Matryoshka [58]
Personalized-Prompt PromptingLi et al. [57], RecGPT [166], PEPLER-D [59], GRAPA [94], SGPT [28], PFCL [152]
Generation from Implicit PreferencesFine-tuning-Based MethodsPLoRA [165], LM-P [142], MiLP [165], OPPU [122], PER-PCS [120], Review-LLM [91], UserIdentifier [78], UserAdapter [171], HYDRA [175], PocketLLM [90], CoGenesis [161]
Reinforcement Learning-Based MethodsP-RLHF [62], P-SOUPS [47], PAD [20], REST-PG [104], Salemi et al. [103], RewrimerSIRI [57],Kulkarni et al. [54]
From RAG to AgentPersonalized UnderstandingIn user-profile understandingXu et al. [148], Abbasian et al. [2],
In agent's role understandingRoleLLM [139], Character-LLM [110], Wang et al. [134],
In agent's user-role joint understandingSocialBench [18], Dai et al. [27], Ran et al. [96], Wang et al. [126], Tu et al. [123], Necko [153]
Personalized Planning and ExecutionMemory ManagementEMG-RAG [137], Park et al. [87], Abbasian et al. [2], RecAgent [133], TravelPlanner+ [114], PersonalWAB [17], VOYAGER [127], MemoeryLLM [136]
Tool and API CallingVOYAGER [127], Zhang et al. [159], PUMA [17], Wang et al. [126], PenetrativeAI [148], Huang et al. [44], [87], MetaGPT [40], OKR-Agent [169]
Personalized GenerationAlignment with User FactCharacter-LLM [110], Wang et al. [135], Dai et al. [27]
Alignment with User PreferencesWang et al. [139], Ran et al. [96], Wang et al. [134], Chen et al. [18]
", + "bbox": [ + 178, + 138, + 879, + 500 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- User Historical Content: Implicit personalization derived from user-generated content, such as chat history, emails, reviews, and social media interactions.", + "- Persona-Based User Simulation: The use of LLM-based agents to simulate and generate personalized interactions." + ], + "bbox": [ + 174, + 530, + 879, + 578 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Integrating this personalized information at various stages of the RAG and agent workflows enables dynamic alignment with human preferences, thereby making responses more user-centric and adaptive.", + "bbox": [ + 174, + 587, + 879, + 619 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 HOW TO ADOPT PERSONALIZATION", + "text_level": 1, + "bbox": [ + 174, + 637, + 460, + 648 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We define the process of introducing personalization within the RAG pipeline as follows:", + "bbox": [ + 174, + 659, + 710, + 672 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\ng = \\mathcal {G} (\\mathcal {R} (Q (q, p), C, p), \\text {p r o m p t}, p, \\theta) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 411, + 684, + 880, + 698 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $p$ denotes personalized information, and the process unfolds in three steps. In the pre-retrieval phase, query processing $(Q)$ refines the query $q$ using personalized information, such as through query rewriting or expansion. During the retrieval phase, the retriever $(\\mathcal{R})$ leverages $p$ to fetch relevant documents from the corpus $(C)$ . Finally, in the generation phase, the retrieved information, combined with $p$ and structured using the given prompt, id fed into the generator $(\\mathcal{G})$ with parameter $\\theta$ to produce the final response $g$ . It is evident that personalized information directly influences multiple stages of the RAG pipeline. In this survey, we consider the agent system as a specialized application of the RAG framework, where personalization is incorporated in a manner similar to the RAG framework.", + "bbox": [ + 174, + 708, + 880, + 825 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 176, + 90, + 459, + 102 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "X. Li and P. Jia, et al.", + "bbox": [ + 779, + 90, + 879, + 101 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 524, + 830, + 532, + 839 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/8f9e5a0db9555c1a4ed96186bb98667b2c4fd2abae37751b9dee8a95b60f65c1.jpg", + "image_caption": [ + "Fig. 2. Overview of the personalized pre-retrieval stage." + ], + "image_footnote": [], + "bbox": [ + 114, + 125, + 816, + 383 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 WHERE TO ADOPT PERSONALIZATION", + "text_level": 1, + "bbox": [ + 114, + 425, + 415, + 438 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1 Pre-retrieval", + "text_level": 1, + "bbox": [ + 114, + 446, + 238, + 459 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1.1 Definition. Pre-retrieval is a crucial step in information retrieval systems, where the original user query is enhanced or modified before the retrieval process to improve the relevance and quality of the search results, as shown in Figure 2. This process often incorporates additional contextual or personalized information to better align the query with the user's intent. The process can be formalized as follows:", + "bbox": [ + 112, + 469, + 821, + 535 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nq ^ {*} = Q (q, p) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 426, + 545, + 821, + 560 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $p$ and $q$ denote the personalized information and original query, and $q^{*}$ is the optimized query after query reformulation.", + "bbox": [ + 112, + 570, + 820, + 599 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1.2 Query Rewriting. Query rewriting in RAG at the pre-retrieval stage refers to the process of reformulating user queries to enhance retrieval effectiveness by improving relevance, disambiguating intent, or incorporating contextual information before retrieving documents from an external knowledge source. The literature on personalized query rewriting can be broadly classified into two primary categories: (1) Direct Personalized Query Rewriting and (2) Auxiliary Personalized Query Rewriting.", + "bbox": [ + 112, + 614, + 821, + 696 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "(1). Direct Personalized Query Rewriting. The first category focuses on personalized query rewriting by using direct models. For example, Cho et al. [25] presents a personalized search-based query rewrite system for conversational AI that addresses user-specific semantic and phonetic errors. Nguyen et al. [82] apply reinforcement learning techniques to improve query rewriting in online e-commerce systems, leveraging distilled LLMs for personalized performance. CLE-QR [60] explores query rewriting in Taobao's search engine to enhance user satisfaction through customized query adaptation. CGF [38] introduces a constrained generation framework that allows for more flexible and personalized query rewriting in conversational AI. Li et al. [57] investigate learning methods to rewrite prompts for personalized", + "bbox": [ + 112, + 709, + 821, + 827 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "A Survey of Personalization: From RAG to Agent", + "bbox": [ + 114, + 90, + 346, + 102 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 539, + 90, + 821, + 101 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 464, + 830, + 470, + 839 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "text generation, improving the relevance and engagement of AI-generated content. Additionally, PEARL [80] discusses personalizing large language model-based writing assistants through the integration of generation-calibrated retrievers, enhancing AI-generated content.", + "bbox": [ + 174, + 121, + 880, + 170 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "(2). Auxiliary Personalized Query Rewriting. The second category emphasizes personalized query rewriting by using auxiliary mechanisms, such as retrieval, reasoning strategies, and external memory. Zhou et al. [173] propose a least-to-most prompting strategy that aids in complex reasoning within LLMs, which can be adapted for personalized text generation. ERAGent [112] enhance retrieval-augmented LLMs to improve personalization, efficiency, and accuracy, indirectly supporting personalized query rewriting for content generation. CoPS [174] integrate LLMs with memory mechanisms to create more personalized search experiences, which also influences content generation through better query understanding. Further, Agent4Ranking [61] employs multi-agent LLMs to perform semantic robust ranking, including personalized query rewriting to improve search rankings. FIG [22] combine graph-based methods with LLMs to query rewrite, improving personalized content generation and conversational interactions. Lastly, BASES [99] employ LLM-based agents to simulate large-scale web search user interactions, contributing to the development of personalized query rewriting strategies for content generation.", + "bbox": [ + 174, + 185, + 883, + 373 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1.3 Query Expansion. Query expansion enhances retrieval systems by expanding a user's original query with additional terms, synonyms, or refined structure to better capture intent. This improves the relevance and scope of retrieved documents. Recent advancements in LLMs have reinvigorated this field [46, 48, 132], leveraging their comprehension and generation abilities to expand queries using encoded knowledge or external retrieval, with notable success. Personalized query expansion, a subset, incorporates user-specific data to tailor results, boosting performance and customizing the search experience.", + "bbox": [ + 174, + 388, + 883, + 491 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(1). Tagging-based Query Expansion. By 2009, studies began incorporating tagging information to enhance personalized query expansion. For instance, Gossle [10] introduced the TagMap and TagRank algorithms, which dynamically selected tags from personalized networks constructed using the cosine similarity of user-item tag distances, improving recall performance. Similarly, Biancalana and Micarelli [12] recorded user queries and visited URLs, leveraging social bookmarking to extract relevant tags and build a personalized three-dimensional co-occurrence matrix. Based on this, multiple semantically categorized expanded queries were generated to better reflect user interests. Further advancements include SoQuES [15], which integrated tag semantic similarity with social proximity, and a graph-based approach [172] that utilized Tag-Topic models and pseudo-relevance feedback for term weighting, tailoring the expansion process to individual user preferences.", + "(2). Else. Apart from tagging-based techniques, early research on Personalized Query Expansion primarily focused on modeling user personalization based on search history [66], social networks, or preferences derived from friendship networks [9]. The Axiomatic PQEC framework [79] formalized expansion rules using both local (user behavior-driven) and social (network-driven) strategies. In 2017, WE-LM [144] advanced this paradigm by modeling multi-relational networks with word embeddings across tag-word relationships, refining associations through affinity graphs. Later, PSQE [14] further improved tagging-based methods using utf-iuf user profiling, integrating a tag similarity graph with user profiles in the online phase to compute expansion terms relevant to user interests in real-time, achieving dynamic personalized expansion. In addition, PQEWC [7] leveraged clustering and contextual word embeddings to optimize query expansions dynamically." + ], + "bbox": [ + 174, + 505, + 883, + 827 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 176, + 90, + 459, + 102 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "X. Li and P. Jia, et al.", + "bbox": [ + 779, + 90, + 879, + 101 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 524, + 830, + 532, + 840 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/431fadc2fb6b837e6448de02affe85f7a85af23cf964b4cebff641487aab910d.jpg", + "image_caption": [ + "Fig. 3. Overview of the personalized retrieval stage." + ], + "image_footnote": [], + "bbox": [ + 117, + 117, + 821, + 281 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.1.4 Others. Besides query rewriting and query expansion, other personalized query-related research focuses on areas like query disambiguation and query auto-completion [116]. Bobo [33] allows users to input contextual terms reflecting their domain knowledge. In 2019, a method [52] applied fastText embeddings from recent queries to rank candidates. In addition, PSQE [8] employed synthetic user profiles from Wikipedia and word2vec embeddings for query disambiguation.", + "bbox": [ + 114, + 332, + 821, + 416 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.1.5 Discussion. While both query rewriting and query expansion aim to align user input with system understanding to enhance retrieval quality, their roles in personalization differ in fundamental ways. Understanding the distinct operational characteristics and application scenarios of each technique is essential for designing effective personalized retrieval systems. The key takeaways are listed as follows:", + "bbox": [ + 114, + 430, + 821, + 496 + ], + "page_idx": 6 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Query rewriting is most beneficial when the original query is ambiguous, underspecified, or misaligned with retrieval intents, particularly in conversational or multi-turn settings.", + "- Query expansion is most effective when the original query is relevant but incomplete - i.e., when it needs to be semantically broadened to cover additional relevant concepts." + ], + "bbox": [ + 114, + 506, + 818, + 571 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2 Retrieval", + "text_level": 1, + "bbox": [ + 114, + 590, + 215, + 603 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2.1 Definition. The retrieval process involves finding the most relevant documents $D^{*}$ from a corpus $C$ based on a query $q^{*}$ , as shown in Figure 3. To incorporate personalization, additional user-specific information $p$ is integrated into the retrieval function $\\mathcal{R}$ . This allows the retrieval process to tailor the selected documents to align with individual user preferences or contexts, thereby enhancing the relevance and personalization of the generated outputs.", + "bbox": [ + 114, + 612, + 818, + 679 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nD ^ {*} = \\mathcal {R} (q ^ {*}, C, p) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 413, + 688, + 821, + 705 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In the retrieval process, personalization can primarily be introduced by focusing on three steps: indexing, retrieval, and post-retrieval. These steps ensure efficient and accurate retrieval of relevant documents or knowledge, while tailoring the process to individual user preferences. Below, we provide a detailed explanation of each step.", + "bbox": [ + 114, + 714, + 818, + 763 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2.2 Indexing. Indexing organizes knowledge base data into a structured format to facilitate efficient retrieval. Within the RAG pipeline, documents are either chunked or entirely encoded into representations before being integrated into searchable systems [30, 117]. Conventional encoding methods employ either sparse encoding techniques (e.g.,", + "bbox": [ + 114, + 777, + 821, + 825 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "A Survey of Personalization: From RAG to Agent", + "bbox": [ + 114, + 90, + 346, + 102 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 539, + 90, + 821, + 101 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 462, + 830, + 472, + 839 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "TF-IDF [95], BM25 [102]) or dense encoding approaches leveraging pre-trained models, such as BERT [1], Siamese Encoders [98], or LLM-based encoders [64, 147].", + "bbox": [ + 174, + 121, + 880, + 152 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To introduce personalization at the indexing stage, PEARL [80] generates user embeddings by encoding personal history data with models like DeBERTa. These embeddings are subsequently clustered to create personalized shared indices. Other approaches integrate knowledge graphs into indexing to enhance retrieval performance. For example, KG-R retriever [21] employs a Hierarchical Index Graph, consisting of a knowledge graph layer and a collaborative document layer, to improve RAG retrieval. EMG-RAG [137] incorporates personalized memory within an editable knowledge graph, enabling dynamic retrieval. Similarly, PGraphRAG [5] leverages user-centric knowledge graphs to enhance personalization in retrieval tasks.", + "bbox": [ + 174, + 156, + 883, + 273 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.2.3 Retrieval. The Retrieval step matches a user query with the indexed knowledge base to fetch relevant candidates. It can be broadly categorized into four different types: (1) Dense Retrieval, (2) Sparse Retrieval, (3) Prompt-based Retrieval, and (4) Others.", + "bbox": [ + 174, + 287, + 883, + 337 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "(1). Dense Retrieval. Dense retrieval methods often use vector embeddings and similarity metrics (e.g., cosine similarity) and achieve personalization by encoding user preferences, context, or interactions into query or document embeddings, enabling tailored results through similarity-based matching. For instance, MeMemo [138] retrieves personalized information by matching user-specific embeddings with document vectors, focusing on private, on-device text generation. Similarly, RECAP [71] and LAPDOG [43] enhance personalized dialogue generation by encoding queries and user profiles as dense vectors and retrieving top-N results, ensuring user-specific context drives the responses. In chatbots, Gu et al. [37] integrates conversational context and user profiles to align retrieved responses with user personas. PersonalM [77] employs group-wise contrastive learning, training its retrieval model to align user queries with domain-specific text fragments, thereby improving personalization. UIA [155] employs dual encoders to retrieve documents tailored to user preferences. XPERT [125] incorporates temporal events and user interactions into embeddings, enabling large-scale retrieval across millions of items.", + "bbox": [ + 174, + 351, + 883, + 537 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Dense retrieval also enhances specific applications like e-commerce, medical assistance, and language models. DPSR [157] and RTM [11] encode user queries and product information to personalize product searches dynamically. Pearl [80] and MemPrompt [74] retrieve personalized content by leveraging historical user data and memory-assisted mechanisms. EERRA [23] uses review embeddings as dense queries for recommendations. In medical assistance, MALP [160] and User-LLM [84] integrate short- and long-term user interactions into embeddings for contextualized, personalized responses. Finally, PER-PCS [120] retrieves relevant information using individual user histories, enhancing the personalization capabilities of large language models.", + "bbox": [ + 174, + 541, + 885, + 659 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "(2). Sparse Retrieval. Sparse retrieval methods often rely on term-based matching (e.g., BM25) and apply personalization by assigning higher weights to terms or keywords that are more relevant to the user. OPPU [121] uses the BM25 algorithm to select the k most relevant records from the user's historical data for the current query. Similarly, PAG [101] incorporates user input and profiles to enhance summarization and retrieval, aligning sparse representations with personalization objectives for large language models. Au et al. [5] uses BM25 search algorithms to find entries related to the target user or neighboring users through the graph structure. UniMS-RAG [128] combines sparse and dense retrieval by leveraging multi-source knowledge, such as dialogue context and user images, to refine personalized responses in dialogue systems. Lastly, Deng et al. [29] apply sparse retrieval to support fact-based queries, considering user queries and preferences to enhance answer generation for e-commerce applications.", + "bbox": [ + 174, + 672, + 885, + 827 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 176, + 90, + 459, + 102 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "X. Li and P. Jia, et al.", + "bbox": [ + 779, + 90, + 879, + 101 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 524, + 830, + 534, + 840 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(3). Prompt-based Retrieval. Prompt-based retrieval leverages prompts to guide retrieval from the model or external sources and introduces personalization by crafting user-specific prompts that guide the retrieval process. These prompts may include explicit user preferences, historical interactions, or detailed instructions that reflect the user's unique requirements. By embedding this personalized context directly into the prompt, the retrieval process can dynamically adjust to capture and return results that are most relevant to the user. LAPS [50] focuses on multi-session conversational search by storing user preferences and dialogue context, then using prompts to retrieve relevant information tailored to the user's biases and categories of interest. UniMP [140] employs user interaction histories as input to prompt-based retrieval, enabling personalized recommendations for multi-modal tasks, such as vision-language applications, by aligning prompts with user behavioral data. In contrast, Shen et al. [111] explores the use of LLMs to extract empathy and narrative styles from user-provided stories, but this work primarily focuses on style extraction and does not explicitly involve a retrieval component.", + "(4). Others. Reinforcement learning-based retrieval personalizes the process by optimizing retrieval policies based on user feedback, learning user preferences over time to adjust strategies. Salemi et al. [103] combines models like BM25, RbR, and dense retrieval, refining them with reinforcement learning (RL) and knowledge distillation (KD) to adapt to user profiles for personalized outputs. Parameter-based retrieval leverages pre-trained model parameters to implicitly store and retrieve user-specific information, allowing direct retrieval from the model without traditional indices. PersonalTM [65] generates document identifiers (Document IDs) using a Transformer model, encoding query, history, and document relationships into its parameters for personalization. Similarly, Zhang et al. [165] uses parameterized representations to integrate user queries and histories, tailoring responses to individual preferences." + ], + "bbox": [ + 114, + 121, + 823, + 457 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.2.4 Post-retrieval. Current Post-Retrieval methods primarily focus on refining retrieved documents or responses to improve relevance and coherence, current methodologies could be categorized into three parts (1) Re-ranking, (2) Summarization, and (3) Compression.", + "bbox": [ + 114, + 470, + 820, + 518 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(1). Re-ranking. Re-ranking enhances personalized content generation by prioritizing more relevant documents at the top. PersonaRAG [156] extends RAG by integrating user-centric agents, such as the Live Session Agent and the Document Ranking Agent, to refine document ranking and improve overall performance. Pavliukevich et al. [89] propose a cross-encoder BERT model for re-ranking external knowledge within a personalized context. UniMS-RAG [128] introduces a scoring mechanism that evaluates retrieved documents and outputs by optimizing the retriever. Besides, it includes an evidence attention mask, enabling re-ranking during inference and applying it to personalized datasets. Salemi and Zamani [106] present an iterative approach to optimizing ranking results based on the expectation-maximization algorithm, with performance validated in personalized scenarios.", + "(2). Summarization. Summarization refers to the process of summarizing retrieved information to enhance performance. For instance, Zhang et al. [164] introduced a role-playing agent system to summarize retrieved history in order to improve the final Personalized Opinion Summarization process.", + "(3). Compression. Compression involves condensing embeddings or retrieved content to enhance efficiency and effectiveness. Approaches like AutoCompressor [24] compress contextual embeddings into shorter semantic representations, and FIT-RAG [76] introduces a self-knowledge recognizer along with a sub-document-level token reduction mechanism to minimize tokens within RAG pipeline. However, few studies have specifically explored personalized fields, highlighting a promising direction for future research." + ], + "bbox": [ + 114, + 532, + 823, + 825 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "A Survey of Personalization: From RAG to Agent", + "bbox": [ + 114, + 90, + 346, + 102 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 539, + 90, + 821, + 101 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 464, + 830, + 472, + 839 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.2.5 Discussion. Indexing, retrieval, and post-retrieval methods each play a critical role in ensuring efficient and personalized information processing, with specific applications and trade-offs. Indexing focuses on organizing knowledge bases for efficient retrieval, using techniques such as sparse encoding methods like TF-IDF and BM25, which are efficient but limited in understanding semantics, and dense encoding methods like BERT and DeBERTa, which provide better semantic understanding but require significant computational resources. These methods are widely used in tasks like question answering and personalized recommendation systems. Retrieval involves matching user queries with relevant documents and can be categorized into dense retrieval, which provides high semantic understanding and personalization but is computationally expensive; sparse retrieval, which is efficient and interpretable but less capable of handling semantics; prompt-based retrieval, which is highly flexible and adaptable to user needs but requires careful engineering of prompts; and advanced methods like reinforcement learning-based approaches, which dynamically adapt to user feedback but are complex to implement. This step is essential in applications like personalized dialogue systems, search engines, and e-commerce. Post-retrieval methods refine retrieved results to enhance relevance and coherence through re-ranking, which improves personalization and prioritizes relevant content but increases computational overhead; summarization, which simplifies complex information for better user understanding but risks losing critical details; and compression, which reduces computational costs by condensing information but remains underexplored in personalized contexts. Together, these methods provide a comprehensive pipeline for delivering efficient, relevant, and personalized outputs, balancing their strengths in semantic understanding, relevance, and flexibility with challenges related to computational costs and implementation complexity.", + "bbox": [ + 174, + 121, + 883, + 431 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.3 Generation", + "text_level": 1, + "bbox": [ + 174, + 446, + 289, + 458 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.3.1 Definition. Personalized generation incorporates user-specific retrieved documents $D^{*}$ , task-specific prompt prompt, and user preference information $p$ via the generator $\\mathcal{G}$ parameterized by $\\theta$ to produce tailored content $g^{*}$ aligned with individual preference, where the flow is shown in Figure 4. The generation process can be formulated as", + "bbox": [ + 174, + 467, + 883, + 517 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\ng ^ {*} = \\mathcal {G} (D ^ {*}, \\text {p r o m p t}, p, \\theta). \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 447, + 526, + 880, + 542 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Personalized generation can be achieved by incorporating explicit and implicit preferences. Explicit preference-driven methodologies utilize direct input signals (e.g., $D^{*}$ , prompt, and $p$ ), to tailor outputs to specific user preferences. Conversely, implicit preference-encoded approaches embed personalized information within the parameters $\\theta$ of the generator model, during training, thereby facilitating preference alignment without the necessity for explicit runtime inputs.", + "bbox": [ + 174, + 551, + 883, + 635 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.3.2 Generation from Explicit Preferences. Integrating explicit preferences into LLMs facilitates personalized content generation. Explicit preference information encompasses user demographic information (e.g., age, occupation, gender, location), user behavior sequences (reflecting historical behavioral patterns), and user historical output texts (capturing writing style and tone preferences). The injection of explicit preferences for personalized generation can be categorized into three types: (1) Direct-integrated Prompting, (2) Summary-augmented Prompting, and (3) Adaptive Prompting.", + "bbox": [ + 174, + 647, + 883, + 750 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "(1). Direct-integrated Prompting. Integrating user explicit preferences into language models through prompting enables the prediction of users' intent and behavioral patterns, facilitating personalized content generation. For instance, $\\mathrm{P}^2$ [49], Character Profiling [154], and OpinionQA [107] integrate personalized data into LLMs through prompting for role-playing task, thereby aligning the model's responses with specified user profiles. Kang et al. [51] and Liu et al. [67]", + "bbox": [ + 174, + 761, + 883, + 828 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 176, + 90, + 459, + 102 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "X. Li and P. Jia, et al.", + "bbox": [ + 779, + 90, + 879, + 101 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 522, + 830, + 535, + 840 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/684289db43c065be977860260a5ca5599fa5f676c9663e41befff2d9bcc9c089.jpg", + "image_caption": [ + "Fig. 4. Overview of the personalized generation stage." + ], + "image_footnote": [], + "bbox": [ + 135, + 119, + 802, + 292 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "integrate interaction histories into LLMs via prompting to predict user rating for candidate items. Cue-CoT [129] employs chain-of-thought reasoning to infer user needs from contextual cues, enabling personalized responses to in-depth dialogue questions. Additionally, TICL [26] proposes a trial-and-error framework that critiques initial LLM-generated responses, derives explanations and integrates these negative examples into prompts to improve personalization alignment.", + "bbox": [ + 114, + 349, + 821, + 434 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(2). Summary-augmented Prompting. Direct integration of personalized information via prompting struggles with ambiguous intent signals: Lengthy interaction histories introduce noise that obscures critical behavioral patterns [69], while sparse behavioral data lacks sufficient context for LLMs to derive meaningful user preferences. To address these issues, recent approaches focus on summarizing user personalized intents and integrating them into prompts. For instance, GPG [158] extracts key user habits and preferences from personal contexts, enabling fine-grained personalization. Similarly, LLMs are employed to generate task-specific summaries of user preferences, enhancing retrieval-augmented personalized generation capabilities [101]. In recommendation systems, ONCE [70], LLMTreeRec [163], and KAR [145] leverage historical user-item interactions to summarize user preferences. Furthermore, Matryoshka [58] generates user preference summaries by dynamically retrieving and synthesizing historical data.", + "(3). Adaptive Prompting. Manually designing personalized prompts demands both expert knowledge and significant labor, motivating the development of automated methods for personalized prompt generation. For example, Li et al. [57] trains a personalized prompt rewriter via supervised and reinforcement learning. RecGPT [166] and PEPLER-D [59] leverage prompt tuning to generate personalized prompts, enhancing sequential and explainable recommendations, respectively. GRAPA [94] integrates semantic and collaborative signals from user-item interaction graphs with graph neural networks to generate context-aware personalized prompts. SGPT [28] employs prompt tuning to jointly model common and group-specific patterns, bridging generalized and personalized federated learning paradigms. Furthermore, PFCL [152] achieves multi-granularity human preference modeling: coarse-grained prompts distill shared knowledge, while fine-grained prompts adapt to individual user characteristics." + ], + "bbox": [ + 112, + 446, + 823, + 765 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4.3.3 Generation from Implicit Preferences. Unlike explicit preference modeling, which captures user preferences through textual input, implicit preference-based methods incorporate personalization through internal parameters. This personalization is achieved either through Parameter-Efficient Fine-tuning (PEFT) techniques, such as LoRA [42],", + "bbox": [ + 114, + 777, + 823, + 827 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "A Survey of Personalization: From RAG to Agent", + "bbox": [ + 114, + 90, + 346, + 102 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 539, + 90, + 821, + 101 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 462, + 830, + 473, + 840 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "or reinforcement learning-based approaches for preference alignment [20, 57]. Based on these strategies, we classify existing methods into two categories: (1) Fine-tuning-Based Methods and (2) Reinforcement Learning-Based Methods.", + "bbox": [ + 174, + 121, + 883, + 154 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "(1). Fine-tuning Based Methods. For fine-tuning methods, LoRA is the most widely adopted since it is resource-efficient and enables rapid adaptation without compromising model performance. PLoRA [165] introduces a personalized knowledge integration framework that combines task-specific LoRA with user-specific knowledge. Similarly, LM-P [142] personalizes information via LoRA by incorporating User ID as a personalization factor. MiLP [165] employs Bayesian optimization to determine the optimal personalization injection configuration, including LoRA settings, to effectively capture and utilize user-specific information. OPPU [122] and PER-PCS [120] follow a similar approach, leveraging user history data for fine-tuning LoRA-based personalization. However, PER-PCS differs by incorporating a gating module that selects the appropriate LoRA, enabling fine-grained personalization. Additionally, Review-LLM [91] integrates LoRA for supervised fine-tuning in the task of personalized review generation.", + "bbox": [ + 174, + 166, + 883, + 319 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Beyond LoRA-based approaches, alternative pipelines have been proposed for personalized generation. UserIdentifier [78] introduces a user-specific identifier, significantly reducing training costs while enhancing personalized demonstration. UserAdapter [171] proposes user-independent prefix embeddings, leveraging prefix tuning for personalization. Meanwhile, HYDRA [175] achieves implicit personalization by training user-specific headers. Recently, researchers have also explored fine-tuning personalized model on edge devices [90] and collaborative learning between small and large language models to enable more personalized generation [161].", + "bbox": [ + 174, + 321, + 883, + 422 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "(2). Reinforcement Learning Based Methods. Apart from fine-tuning based methods, recent research has explored reinforcement learning based techniques to personalize text generation by aligning outputs with user preferences. P-RLHF [62] has been proposed to jointly learn a user-specific and reward model to enable text generation that aligns with a user's styles or criteria. P-SOUPS [47] models multiple user preferences as a Multi-Objective Reinforcement Learning (MORL) problem, decomposing preferences into multiple dimensions, each trained independently. PAD [20] aligns text generation with human preferences during inference by utilizing token-level personalized rewards to guide the decoding process. REST-PG [104] introduces a framework that trains large language models to reason over personal data during response generation. This approach first generates reasoning paths to enhance the LLM's reasoning ability and then employs Expectation-Maximization Reinforced Self-Training to iteratively refine the model based on its high-reward outputs. Additionally, Salemi et al. [103] incorporate reinforcement learning into the RAG pipeline to improve retrieval accuracy, thereby enhancing the personalization of generated content. Other applications include RewriterSIRI [57], which has been introduced to generate text via RL-based personalized prompt rewriting using API-based LLMs, and Kulkarni et al. [54], who explore the use of reinforcement learning to optimize RAG for improving the relevance and coherence of chatbot responses in specialized domains, ultimately enhancing user satisfaction and engagement.", + "bbox": [ + 174, + 436, + 883, + 694 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "4.3.4 Discussion. Personalized generation can be adopted via both explicit and implicit preference injection, yet they exhibit distinct characteristics that make them suitable for different scenarios. In explicit preference-based generation, personalization is clearly defined through user profile descriptions, contextual information, and similar inputs, which are incorporated into generators via prompts. A key advantage of this approach is explainability, as the personalized information is explicitly provided and easily traceable. Despite leveraging provided preferences and internal knowledge, explicit preference injection's personalization is constrained by model capabilities and irrelevant information interference. In contrast, implicit preference-based generation internalizes personalized information into", + "bbox": [ + 174, + 708, + 883, + 825 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 176, + 90, + 459, + 102 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "X. Li and P. Jia, et al.", + "bbox": [ + 779, + 90, + 880, + 101 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 522, + 830, + 535, + 839 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/c1f227eab52914058e52cd6428ef57f5faa6d829581489e901866e44230f1223.jpg", + "image_caption": [ + "Fig. 5. Overview of transition from personalized RAG to personalized agent." + ], + "image_footnote": [], + "bbox": [ + 122, + 118, + 816, + 489 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "the generator's parameters through scene-specific personalized data, thereby adapting the model for more fine-grained personalization. However, these methods typically incur substantial training and computational costs, as they require fine-tuning the generator's internal parameters. Therefore, selecting between these approaches should be guided by the specific application scenario and resource constraints.", + "bbox": [ + 114, + 555, + 821, + 621 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "4.4 From RAG to Agent", + "text_level": 1, + "bbox": [ + 114, + 642, + 284, + 656 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "4.4.1 Definition. A personalized LLM-based agent is a system designed to dynamically incorporate user context, memory, and external tools or APIs to support highly personalized and goal-oriented interactions [19, 45, 146], and solve problems in a goal-oriented manner [63, 113]. From the previously introduced stages of RAG, we observe that the evolution of personalized RAG reveals a structural convergence with agent architectures. We analyze them from three key perspectives:", + "bbox": [ + 112, + 662, + 821, + 748 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "- Personalized Understanding: This phase within the agent parallels the query understanding and rewriting process of RAG as outlined in Section 4.1. However, it extends beyond static semantic parsing by incorporating dynamic user profiling [139] and role modeling [110]. This integration enables the agent to dynamically align interactions with implicit user preferences, facilitating personalized responses and task-specific adaptations [96].", + "bbox": [ + 114, + 758, + 821, + 825 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "A Survey of Personalization: From RAG to Agent", + "bbox": [ + 114, + 90, + 346, + 102 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 539, + 90, + 821, + 101 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 462, + 830, + 473, + 840 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Personalized Planning and Execution: This phase in agents mirrors RAG's retrieval process in Section 4.2 yet it advances beyond static document retrieval by incorporating real-time memory management [87] and sophisticated tool and API calling [127]. This approach ensures the dynamic alignment of external knowledge with personalized constraints, such as integrating medical history in healthcare agents [2], to deliver context-aware and user-specific outcomes.", + "- Personalized Generation: This phase in agents mirrors RAG's generative process in Section 4.3 but transcends static template-based generation by integrating user preference and fact alignment. Agents dynamically enforce user preferences and ensure fact consistency through role-specific mechanisms (e.g., social adaptability in conversational agents [2]), enabling outputs to evolve in harmony with personalized and situational constraints rather than relying solely on predefined generative frameworks." + ], + "bbox": [ + 174, + 121, + 880, + 292 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In general we frame agent architectures as \"personalized RAG++\", where persistent memory [137] replaces static indexes, and tool APIs [17] serve as dynamic knowledge connectors, enabling complicated, human-aligned interactions beyond one-shot retrieval, as shown in Figure 5. This progression highlights that as RAG systems incorporate deeper personalization—requiring user-state tracking, adaptive tool usage, and context-aware generation, they inherently adopt agent-like capabilities.", + "bbox": [ + 174, + 300, + 880, + 383 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "4.4.2 Personalized Understanding. Personalized understanding refers to an agent's ability to accurately interpret user inputs by integrating user intent recognition and contextual analysis. This process ensures interactions that are both meaningful and contextually appropriate. The rationale behind this classification lies in its capacity to address three core aspects of understanding: recognizing user intent, analyzing context, and leveraging user profiles. Each of these aspects plays a distinct role in improving the agent's performance.", + "bbox": [ + 174, + 398, + 880, + 481 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(1). User-profile Understanding. In user-profile understanding, an agent's personalized ability primarily depends on its capacity to accurately model and understand the user's preferences, context, and intentions. Xu et al. [148] proposes a framework in which LLMs are designed to understand the physical world, thereby facilitating a deeper connection between the agent and its environment, which is essential for accurate task execution. Abbasian et al. [2] further expands this understanding by emphasizing the importance of personalization in health agents, where the user's profile directly influences the behavior and decisions of the agent. This user understanding is foundational to ensuring that the AI agent performs tasks in a way that aligns with individual user needs.", + "(2). Role Understanding. In agent's role understanding, the role of the agent within these environments is also crucial. Recent studies focus on enhancing role-playing capabilities within LLMs. Wang et al. [139] introduce RoleLLM, a benchmark that aims to elicit and refine the role-playing abilities of LLMs, demonstrating how role understanding influences agent performance in conversational tasks. Similarly, Shao et al. [110] present Character-LLM, a trainable agent framework for role-playing, which tailors its responses based on predefined roles. Wang et al. [134] introduce a method for evaluating personality fidelity in role-playing agents through psychological interviews, aiming to enhance the realism and consistency of AI-driven characters. This role understanding allows for more contextually appropriate interactions, increasing the relevance and utility of AI agents across various applications.", + "(3). User-role Joint Understanding. In agent's user-role joint understanding, the intersection of user and role understanding is explored through frameworks that evaluate and enhance the social and personality aspects of LLMs. SocialBench Chen et al. [18] provides a sociality evaluation framework for role-playing agents. Dai et al. [27], and" + ], + "bbox": [ + 174, + 494, + 880, + 825 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 176, + 90, + 459, + 102 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "X. Li and P. Jia, et al.", + "bbox": [ + 779, + 90, + 880, + 101 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 522, + 830, + 535, + 840 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "[96] extend this by incorporating multi-modal data and personality-indicative information, respectively, which allows agents to better adapt to both user and role understanding in dynamic environments. Furthermore, Wang et al. [126] offers a perspective on how role and environment understanding can improve user experience. Tu et al. [123] contribute by providing a benchmark specifically for evaluating role-playing agents in the Chinese context, adding a cultural dimension to role understanding. Finally, Neeko [153] further advances role-based interactions.", + "bbox": [ + 114, + 121, + 821, + 204 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "4.4.3 Personalized Planning and Execution. Personalized planning and execution refer to the process of designing and implementing strategies or actions that are specifically tailored to an individual's unique context, and goals [44, 87, 114, 159]. It requires agents to dynamically integrate long-term memory, real-time reasoning, and external tool utilization [40, 41, 169], as demonstrated in healthcare decision support [2] and travel planning scenarios [17]. We analyze two fundamental components that enable this personalization in the following.", + "bbox": [ + 114, + 220, + 823, + 305 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(1). Memory Management. Effective memory systems allow agents to integrate users' historical preferences, behavioral patterns, and contextual habits, enhancing their ability to make planning and tailor interactions to user-specific needs [17, 127, 136]. The EMG-RAG framework [137] combines editable memory graphs with retrieval-augmented generation to maintain dynamic user profiles, while Park et al. [87] implements memory streams and periodic reflection mechanisms to simulate human-like behavior. In healthcare applications, Abbasian et al. [2] integrates multimodal user data through specialized memory modules to optimize treatment recommendations. For recommendation systems, RecAgent [133] employs hierarchical memory structures to model user interaction patterns across multiple domains. Recent advances like TravelPlanner+ [114] demonstrate how memory-augmented LLMs achieve higher relevance in personalized itinerary generation compared to generic planners.", + "(2). Tool and API Calling. The integration of external tools expands agents' capabilities beyond pure linguistic reasoning, enabling agents to interact with users and perform personalized tasks [17, 126, 127, 148, 159]. For instance, VOYAGER [127] establishes a paradigm for lifelong skill acquisition through automatic API curriculum learning and skill library construction. In robotics, Zhang et al. [159] develops a bootstrapping framework where LLMs guide robots in tool-mediated skill discovery, enabling a high success rate in novel object manipulation tasks. The PUMA framework [17] demonstrates how personalized web agents can achieve performance gains in e-commerce tasks through adaptive API orchestration. For mobile interaction, Wang et al. [126] implements few-shot tool learning to handle diverse UI operations with minimal training data. These approaches highlight the importance of tool grounding mechanisms [44] that translate linguistic plans into executable API sequences while maintaining personalization constraints." + ], + "bbox": [ + 114, + 319, + 823, + 641 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "This synthesis highlights that modern agent systems achieve enhanced personalization through two primary strategies: 1) Memory-augmented architectures, which leverage editable memory graphs [137], reflection mechanisms [87], and hierarchical memory structures [133] to dynamically adapt to user preferences across various domains; and 2) Tool and API integration, which expand agent capabilities by balancing generalization with specialization. Future work may explore improving the contextual relevance and adaptability of memory systems while optimizing real-time tool interaction for seamless task execution.", + "bbox": [ + 114, + 643, + 823, + 743 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "4.4.4 Personalized Generation. Based on the foundation of personalized planning and execution mechanisms, which enable agents to adapt strategies to user-specific contexts [44, 159], the next critical concern lies in personalized generation. This capability ensures that generated outputs not only align with factual correctness but also resonate with users' unique preferences, personality traits, and situational needs. Personalized generation bridges the gap between", + "bbox": [ + 114, + 758, + 823, + 825 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "A Survey of Personalization: From RAG to Agent", + "bbox": [ + 114, + 90, + 346, + 102 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 539, + 90, + 821, + 101 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 462, + 830, + 473, + 840 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "adaptive reasoning and human-aligned outcomes, allowing agents to produce contextually relevant and emotionally appropriate responses.", + "bbox": [ + 174, + 121, + 880, + 154 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(1). Alignment with User Fact. Alignment with User Fact emphasizes the accuracy, consistency, and factual grounding of personalized responses, ensuring they remain trustworthy across diverse user interactions. This is particularly challenging in personalized agents, where maintaining character authenticity while avoiding hallucinations requires balancing creativity with factual adherence. Recent advances address these challenges through improved training frameworks and evaluation metrics. For instance, Character-LLM [110] integrates memory-augmented architectures to reduce hallucinations while preserving character-specific traits. Wang et al. [135] investigate quantization effects on personality consistency in edge-deployed agents and stabilize outputs under computational constraints. Dai et al. [27] ensures multimodal consistency (text-image) in role-playing. These works highlight the importance of architectural innovations and rigorous evaluation in achieving reliability.", + "(2). Alignment with User Preferences. Alignment with user preferences ensures that generated outputs reflect individualized personalities, values, and interaction styles. This requires agents to dynamically interpret implicit user cues and adapt responses accordingly. Wang et al. [139] benchmarks role-specific alignment. Ran et al. [96] improves personality fidelity via psychological scale datasets. Wang et al. [134] quantifies alignment via psychological interviews. Chen et al. [18] evaluates social adaptability in conversations." + ], + "bbox": [ + 174, + 176, + 885, + 439 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "4.4.5 Discussion. The architectural evolution from RAG to personalized agents introduces significant advancements in human-AI interaction but also surfaces critical challenges that warrant further investigation.", + "bbox": [ + 174, + 460, + 880, + 492 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Personalized Understanding, while enabling interpretation of user intent and context, faces limitations in real-time adaptability and generalization. Current approaches like RoleLLM [139] and Character-LLM [110] demonstrate robust role-specific comprehension but struggle with dynamic user state tracking, particularly when handling evolving preferences or multi-session interactions. Furthermore, cultural specificity in benchmarks like CharacterEval [123] reveals gaps in global applicability, as agents trained on region-specific data often fail to generalize across diverse sociocultural contexts. Future work could explore hybrid architectures that combine continuous learning mechanisms with privacy-preserving federated learning to address these adaptability constraints while maintaining user trust.", + "bbox": [ + 174, + 494, + 883, + 613 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Personalized Planning and Execution, achieves remarkable task specialization through memory management and tool integration, yet suffers from scalability issues in complex environments. While frameworks like EMG-RAG [137] and VOYAGER [127] effectively manage user-specific constraints, their reliance on predefined API taxonomies limits emergent tool discovery in novel scenarios. The \"cold-start\" problem persists in domains requiring rapid skill acquisition, as seen in healthcare applications [2], where delayed API responses can compromise decision-making efficacy. A promising direction involves developing meta-reasoning architectures that dynamically prioritize memory recall versus tool invocation based on situational urgency and confidence thresholds.", + "bbox": [ + 174, + 617, + 883, + 736 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Personalized Generation balances factual accuracy with preference alignment but risks over-fitting, where excessive finetuning to user profiles may reinforce cognitive biases. Techniques address surface-level alignment but lack mechanisms for ethical boundary detection. For instance, agents might inadvertently propagate harmful stereotypes when mirroring user preferences without critical oversight. Future systems could integrate value-aligned reinforcement learning with human-in-the-loop validation to preserve authenticity while preventing detrimental customization.", + "bbox": [ + 174, + 738, + 883, + 823 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 176, + 90, + 459, + 102 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "X. Li and P. Jia, et al.", + "bbox": [ + 779, + 90, + 880, + 101 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 522, + 830, + 535, + 840 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/d86abcf5e5ec9f4d1a1e2cf178e6e2a2411500ea7111c2031609d3b0586d7569.jpg", + "table_caption": [ + "Table 2. Datasets and metrics for personalized RAG and Agent." + ], + "table_footnote": [], + "table_body": "
FieldMetrics CategoryMetricsDatasets
Pre-retrievalTextual QualityBLEU, ROUGE, EMAvocado Research Email Collection [57, 85], Amazon review [57, 83], Reddit comments[57, 118], Amazon ESCI dataset[82, 97], PIP
Information RetrievalMAP, MRR, NDCG, Precision, Recall, RBPAOL[88, 174], WARRIORS[99], Personalized Results Re-Ranking benchmark [6], delicio.us [9, 15, 144, 172], Flickr [9, 108], CiteULike [10, 14], LRDP [12], Delicious [141], Bibsonomy [79], Wikipedia [8, 33]
ClassificationAccuracy, Macro-F1SCAN [56, 173], AITA WORKSM[53, 80], Robust04 [61]
OthersXEntropy, PMS, Image-Align, PQEC, ProfOverlapAmazon ESCI dataset[82, 97], PIP, Bibsonomy [79]
RetrievalTextual QualityBLEU, ROUGE, Dis, PPLTOPDIAL [130], Pchatbot [93], DuLemon [150]
Information RetrievalRecall, MRR, Precision, F1LiveChat [34], Pchatbot [93], DuLemon [150]
ClassificationAccuracy, SuccTOPDIAL [130], PersonalityEvid [119], DuLemon [150], PersonalityEdit [75]
OthersFluency, Coherence, Plausibility, ES, DD, TPEI, PAEPersonalityEvid [119], PersonalityEdit [75]
GenerationTextual QualityBLEU, ROUGE, Dis, PPL, METEORLaMP [105], Long LaMP [55], Dulemon [150], PGraphRAG [5], AmazonQA/Products [29], Reddit [170], MedicalDialogue [162]
ClassificationAccuracy, F1, Persona F1LaMP [105], Long LaMP [55], Dulemon [150], AmazonQA/Products [29], Reddit [170], MedicalDialogue [162]
RegressionMAE, RMSELaMP [105], Long LaMP [55], PGraphRAG [5]
OthersFluency, Mean Success Rate, Median Relative ImprovementsPersonalized-Gen [3]
AgentTextual QualityBLEU, ROUGE, METEOR, CIDer, EM, Fluency, Coherence, Instruction Adherence, Consistency related metricsRICO [126], RoleBench [139], Shao et al. [110], Socialbench [18], MMRole-Data [27], ROLEPERSONALITY [96], ChatHarui [134], Character-LLM-Data [153], Knowledge Behind Persona [41], Wang et al. [137], Wang et al. [135], Zheng et al. [169]
Information RetrievalRecall, F1, PrecisionKnowledge Behind Persona [41]
ClassificationAccuracy, Failure Rate, Classification Accuracy, Preference Rate, CorrectnessMIT-BIH Arrhythmia Database [148], VirtualHome [44], Socialbench [18], ARC [100], AGIEval [100], HellaSwag [100], MedMCQA [100], AQUA-RAT [100], LogiQA [100], LSAT-AR [100], LSAT-LR [100], LSAT-RC [100], SAT-English [100], SAT-Math [100], PersonalWAB [17], TravelPlanner+ [114]
OthersPass@k, Executability, Productivity, Plausibility of the StoryHong et al. [40], Zheng et al. [169]
", + "bbox": [ + 119, + 138, + 823, + 488 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "5 EVALUATION AND DATASET", + "text_level": 1, + "bbox": [ + 114, + 510, + 338, + 522 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In the evolving landscape of personalization, from RAG to advanced Agent-based systems, the evaluation of models relies heavily on diverse datasets and metrics tailored to specific tasks. This survey categorizes metrics into several key types: Textual Quality metrics (e.g., BLEU, ROUGE, METEOR) assess the fluency and coherence of generated outputs; Information Retrieval metrics (e.g., MAP, MRR, Recall) evaluate the accuracy and relevance of retrieved information; Classification metrics (e.g., Accuracy, F1) measure task-specific correctness; Regression metrics (e.g., MAE, RMSE) quantify prediction errors; and Other metrics (e.g., Fluency, Pass@k) address domain-specific or task-unique aspects like plausibility or executability. These metrics span pre-retrieval, retrieval, generation, and agent-based personalization approaches, reflecting their varied objectives. To provide a comprehensive overview, we compile an extensive list of datasets across these fields, as detailed in Table 2. These datasets, paired with their respective metrics, enable researchers to benchmark and refine personalized systems, from enhancing query rewriting to enabling autonomous agents in physical and virtual environments.", + "bbox": [ + 114, + 531, + 821, + 719 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "6 CHALLENGES AND FUTURE DIRECTIONS", + "text_level": 1, + "bbox": [ + 114, + 734, + 429, + 748 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Personalized RAG and agent-based systems still face several critical challenges that warrant further exploration. We list them as follows:", + "bbox": [ + 114, + 756, + 818, + 787 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "- Balancing Personalization and Scalability: Integrating personalization data (such as preferences, history, and contextual signals) into RAG processes often increases computational complexity, making it difficult to maintain", + "bbox": [ + 114, + 795, + 818, + 825 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "A Survey of Personalization: From RAG to Agent", + "bbox": [ + 116, + 90, + 346, + 102 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 539, + 90, + 818, + 101 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 462, + 830, + 473, + 840 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "efficiency and scalability across large-scale systems. Future work could explore lightweight, adaptive embeddings and hybrid frameworks that seamlessly fuse user profiles with real-time contexts.", + "bbox": [ + 186, + 121, + 880, + 152 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Evaluating Personalization Effectively: Current metrics like BLEU, ROUGE, and human evaluations fall short in capturing the nuanced alignment of outputs with dynamic user preferences, lacking tailored measures for personalization efficacy. Developing specialized benchmarks and metrics that assess long-term user satisfaction and adaptability is crucial for real-world applicability.", + "- Preserving Privacy through Device-Cloud Collaboration: Personalized retrieval often involves processing sensitive user data, raising privacy concerns, especially with the increased global emphasis on data protection regulations, such as the European Union's General Data Protection Regulation (GDPR). Consequently, a promising approach is the collaborative integration of on-device small Language models which handle sensitive personal data locally, with cloud-based LLM, which provides broader contextual knowledge.", + "- Personalized Agent Planning: Current research on agent planning remains mainly in its early stages, with much of the work focusing on building foundational frameworks such as GUI agents [81] and the application of agents across diverse domains [131]. Notably, the incorporation of personalized approaches has yet to be widely adopted. Exploring how to integrate personalized support into existing frameworks to enhance user experience represents a promising and valuable direction for future investigation.", + "- Ensuring Ethical and Coherent Systems: Bias in data processing, privacy concerns in user profiling, and coherence across retrieval and generation stages remain unresolved. Future directions should prioritize ethical safeguards, privacy-preserving techniques, and cross-stage optimization to build trustworthy, unified personalized systems." + ], + "bbox": [ + 174, + 156, + 883, + 448 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "7 CONCLUSION", + "text_level": 1, + "bbox": [ + 176, + 468, + 302, + 481 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "In this paper, we explore the landscape of personalization from Retrieval-Augmented Generation (RAG) to advanced LLM-based Agents, detailing adaptations across pre-retrieval, retrieval, and generation stages while extending into agentic capabilities. By reviewing recent literature, datasets, and metrics, we highlight the progress and diversity in enhancing user satisfaction through tailored AI systems. However, challenges such as scalability, effective evaluation, and ethical concerns underscore the need for innovative solutions. Future research should focus on lightweight frameworks, specialized benchmarks, and privacy-preserving techniques to advance personalized AI. Relevant papers and resources are also compiled online for ease of future research.", + "bbox": [ + 174, + 489, + 883, + 608 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 176, + 630, + 272, + 642 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] 2021. BERT: a review of applications in natural language processing and understanding. arXiv preprint arXiv:2103.11943 (2021).", + "[2] Mahyar Abbasian, Iman Azimi, Amir M Rahmani, and Ramesh Jain. 2023. Conversational health agents: A personalized llm-powered agent framework. arXiv preprint arXiv:2310.02374 (2023).", + "[3] Bashar Alhafni, Vivek Kulkarni, Dhruv Kumar, and Vipul Raheja. 2024. Personalized Text Generation with Fine-Grained Linguistic Control. In Proceedings of the 1st Workshop on Personalization of Generative AI Systems (PERSONALIZE 2024). 88–101.", + "[4] Amazon. [n.d.]. Amazon Customer Review Dataset. Online dataset. https://nijianmo.github.io/amazon/", + "[5] Steven Au, Cameron J Dimacali, Ojasmitha Pedirappagari, Namyong Park, Franck Dernoncourt, Yu Wang, Nikos Kanakaris, Hanieh Deilamsalehy, Ryan A Rossi, and Nesreen K Ahmed. 2025. Personalized Graph-Based Retrieval for Large Language Models. arXiv preprint arXiv:2501.02157 (2025).", + "[6] Elias Bassani, Pranav Kasela, Alessandro Raganato, and Gabriella Pasi. 2022. A multi-domain benchmark for personalized search evaluation. In Proceedings of the 31st ACM International Conference on Information & Knowledge Management. 3822-3827.", + "[7] Elias Bassani, Nicola Tonellotto, and Gabriella Pasi. 2023. Personalized query expansion with contextual word embeddings. ACM Transactions on Information Systems 42, 2 (2023), 1-35.", + "[8] Oliver Baumann and Mirco Schoenfeld. 2024. PSQE: Personalized Semantic Query Expansion for user-centric query disambiguation. (2024)." + ], + "bbox": [ + 186, + 648, + 882, + 824 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 176, + 90, + 459, + 102 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "X. Li and P. Jia, et al.", + "bbox": [ + 779, + 90, + 879, + 101 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 522, + 830, + 535, + 840 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[9] Matthias Bender, Tom Crecelius, Mouna Kacimi, Sebastian Michel, Thomas Neumann, Josiane Xavier Parreira, Ralf Schenkel, and Gerhard Weikum. 2008. Exploiting social relations for query expansion and result ranking. In 2008 IEEE 24th International Conference on Data Engineering Workshop. IEEE, 501-506.", + "[10] Marin Bertier, Rachid Guerraoui, Vincent Leroy, and Anne-Marie Kermarrec. 2009. Toward personalized query expansion. In Proceedings of the Second ACM EuroSys Workshop on Social Network Systems. 7-12.", + "[11] Keping Bi, Qingyao Ai, and W Bruce Croft. 2021. Learning a fine-grained review-based transformer model for personalized product search. In Proceedings of the 44th international ACM SIGIR conference on research and development in information retrieval. 123-132.", + "[12] Claudio Biancalana and Alessandro Micarelli. 2009. Social tagging in query expansion: A new way for personalized web search. In 2009 International Conference on Computational Science and Engineering, Vol. 4. IEEE, 1060-1065.", + "[13] Microsoft Bing. [n.d]. Bing Search Engine. https://www.bing.com", + "[14] Mohamed Reda Bouadjenek, Hakim Hacid, and Mokrane Bouzeghoub. 2019. Personalized social query expansion using social annotations. Transactions on Large-Scale Data-and Knowledge-Centered Systems XL (2019), 1-25.", + "[15] Mohamed Reda Bouadjenek, Hakim Hacid, Mokrane Bouzeghoub, and Johann Daigremont. 2011. Personalized social query expansion using social bookmarking systems. In Proceedings of the 34th international ACM SIGIR conference on Research and development in Information Retrieval. 1113-1114.", + "[16] Domenico Bulfamante. 2023. Generative enterprise search with extensible knowledge base using ai. Ph.D. Dissertation. Politecnico di Torino.", + "[17] Hongru Cai, Yongqi Li, Wenjie Wang, ZHU Fengbin, Xiaoyu Shen, Wenjie Li, and Tat-Seng Chua. [n. d]. Large Language Models Empowered Personalized Web Agents. In THE WEB CONFERENCE 2025.", + "[18] Hongzhan Chen, Hehong Chen, Ming Yan, Wenshen Xu, Xing Gao, Weizhou Shen, Xiaojun Quan, Chenliang Li, Ji Zhang, Fei Huang, et al. 2024. Socialbench: Sociality evaluation of role-playing conversational agents. arXiv preprint arXiv:2403.13679 (2024).", + "[19] Jiangjie Chen, Xintao Wang, Rui Xu, Siyu Yuan, Yikai Zhang, Wei Shi, Jian Xie, Shuang Li, Ruihan Yang, Tinghui Zhu, et al. 2024. From persona to personalization: A survey on role-playing language agents. arXiv preprint arXiv:2404.18231 (2024).", + "[20] Ruizhe Chen, Xiaotian Zhang, Meng Luo, Wenhao Chai, and Zuozhu Liu. 2024. Pad: Personalized alignment of llms at decoding-time. arXiv preprint arXiv:2410.04070 (2024).", + "[21] Weijie Chen, Ting Bai, Jinbo Su, Jian Luan, Wei Liu, and Chuan Shi. 2024. Kg-retriever: Efficient knowledge indexing for retrieval-augmented large language models. arXiv preprint arXiv:2412.05547 (2024).", + "[22] Zheng Chen, Ziyan Jiang, Fan Yang, Eunah Cho, Xing Fan, Xiaojiang Huang, Yanbin Lu, and Aram Galstyan. 2023. Graph meets LLM: A novel approach to collaborative filtering for robust conversational understanding. arXiv preprint arXiv:2305.14449 (2023).", + "[23] Hao Cheng, Shuo Wang, Wensheng Lu, Wei Zhang, Mingyang Zhou, Kezhong Lu, and Hao Liao. 2023. Explainable recommendation with personalized review retrieval and aspect learning. arXiv preprint arXiv:2306.12657 (2023).", + "[24] Alexis Chevalier, Alexander Wettig, Anirudh Ajith, and Danqi Chen. 2023. Adapting language models to compress contexts. arXiv preprint arXiv:2305.14788 (2023).", + "[25] Eunah Cho, Ziyan Jiang, Jie Hao, Zheng Chen, Saurabh Gupta, Xing Fan, and Chenlei Guo. 2021. Personalized search-based query rewrite system for conversational ai. In Proceedings of the 3rd Workshop on Natural Language Processing for Conversational AI. 179-188.", + "[26]Hyundong Cho, Karishma Sharma, Nicolaas Jedema, Leonardo FR Ribeiro, Alessandro Moschitti, Ravi Krishnan, and Jonathan May. 2025. TuningFree Personalized Alignment via Trial-Error-Explain In-Context Learning. arXiv preprint arXiv:2502.08972 (2025).", + "[27] Yanqi Dai, Huanran Hu, Lei Wang, Shengjie Jin, Xu Chen, and Zhiwu Lu. 2024. Mmrole: A comprehensive framework for developing and evaluating multimodal role-playing agents. arXiv preprint arXiv:2408.04203 (2024).", + "[28] Wenlong Deng, Christos Thrampoulidis, and Xiaoxiao Li. 2024. Unlocking the potential of prompt-tuning in bridging generalized and personalized federated learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 6087-6097.", + "[29] Yang Deng, Yaliang Li, Wenxuan Zhang, Bolin Ding, and Wai Lam. 2022. Toward personalized answer generation in e-commerce via multiperspective preference modeling. ACM Transactions on Information Systems (TOIS) 40, 4 (2022), 1-28.", + "[30] Matthijs Douze, Alexandr Guzhva, Chengqi Deng, Jeff Johnson, Gergely Szilvasy, Pierre-Emmanuel Mazaré, Maria Lomeli, Lucas Hosseini, and Hervé Jégou. 2024. The Faiss library. (2024). arXiv:2401.08281 [cs.LG]", + "[31] ESPN. [n.d.]. ESPN Sports Statistics Dataset. Online dataset.", + "[32] Wenqi Fan, Yujuan Ding, Liangbo Ning, Shijie Wang, Hengyun Li, Dawei Yin, Tat-Seng Chua, and Qing Li. 2024. A survey on rag meeting llms: Towards retrieval-augmented large language models. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 6491-6501.", + "[33] Byron J Gao, David C Anastasiu, and Xing Jiang. 2010. Utilizing user-input contextual terms for query disambiguation. In Coling 2010: Posters. 329-337.", + "[34] Jingsheng Gao, Yixin Lian, Ziyi Zhou, Yuzhuo Fu, and Baoyuan Wang. 2023. LiveChat: A large-scale personalized dialogue dataset automatically constructed from live streaming. arXiv preprint arXiv:2306.08401 (2023).", + "[35] Yunfan Gao, Yun Xiong, Xinyu Gao, Kangxiang Jia, Jinliu Pan, Yuxi Bi, Yi Dai, Jiawei Sun, Haofen Wang, and Haofen Wang. 2023. Retrievalaugmented generation for large language models: A survey. arXiv preprint arXiv:2312.10997 2 (2023).", + "[36] Google. [n.d.]. Google Search. https://www.google.com" + ], + "bbox": [ + 122, + 123, + 821, + 814 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "A Survey of Personalization: From RAG to Agent", + "bbox": [ + 116, + 90, + 346, + 102 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 539, + 90, + 821, + 101 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 462, + 830, + 475, + 840 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[37] Jia-Chen Gu, Hui Liu, Zhen-Hua Ling, Quan Liu, Zhigang Chen, and Xiaodan Zhu. 2021. Partner matters! an empirical study on fusing personas for personalized response selection in retrieval-based chatbots. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval. 565-574.", + "[38] Jie Hao, Yang Liu, Xing Fan, Saurabh Gupta, Saleh Soltan, Rakesh Chada, Pradeep Natarajan, Chenlei Guo, and Gokhan Tur. 2022. CGF: Constrained generation framework for query rewriting in conversational AI. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track. 475-483.", + "[39] Nicola Henze, Peter Dolog, and Wolfgang Nejdl. 2004. Reasoning and ontologies for personalized e-learning in the semantic web. Journal of Educational Technology & Society 7, 4 (2004), 82-97.", + "[40] Sirui Hong, Xiawu Zheng, Jonathan Chen, Yuheng Cheng, Jinlin Wang, Ceyao Zhang, Zili Wang, Steven Ka Shing Yau, Zijuan Lin, Liyang Zhou, et al. 2023. Metagpt: Meta programming for multi-agent collaborative framework. arXiv preprint arXiv:2308.00352 3, 4 (2023), 6.", + "[41] WANG Hongru, Minda Hu, Yang Deng, Rui Wang, Fei Mi, Weichao Wang, Yasheng Wang, Wai-Chung Kwan, Irwin King, and Kam-Fai Wong. [n. d]. Large Language Models as Source Planner for Personalized Knowledge-grounded Dialogues. In The 2023 Conference on Empirical Methods in Natural Language Processing.", + "[42] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. 2022. Lora: Low-rank adaptation of large language models. ICLR 1, 2 (2022), 3.", + "[43] Qiushi Huang, Shuai Fu, Xubo Liu, Wenwu Wang, Tom Ko, Yu Zhang, and Lilian Tang. 2024. Learning retrieval augmentation for personalized dialogue generation. arXiv preprint arXiv:2406.18847 (2024).", + "[44] Wenlong Huang, Pieter Abbeel, Deepak Pathak, and Igor Mordatch. 2022. Language models as zero-shot planners: Extracting actionable knowledge for embodied agents. In International conference on machine learning. PMLR, 9118-9147.", + "[45] Xu Huang, Weiwen Liu, Xiaolong Chen, Xingmei Wang, Hao Wang, Defu Lian, Yasheng Wang, Ruiming Tang, and Enhong Chen. 2024. Understanding the planning of LLM agents: A survey. arXiv preprint arXiv:2402.02716 (2024).", + "[46] Rolf Jagerman, Honglei Zhuang, Zhen Qin, Xuanhui Wang, and Michael Bendersky. 2023. Query expansion by prompting large language models. arXiv preprint arXiv:2305.03653 (2023).", + "[47] Joel Jang, Seungone Kim, Bill Yuchen Lin, Yizhong Wang, Jack Hessel, Luke Zettlemoyer, Hannaneh Hajishirzi, Yejin Choi, and Prithviraj Ammanabrolu. 2023. Personalized soups: Personalized large language model alignment via post-hoc parameter merging. arXiv preprint arXiv:2310.11564 (2023).", + "[48] Pengyue Jia, Yiding Liu, Xiangyu Zhao, Xiaopeng Li, Changying Hao, Shuaiqiang Wang, and Dawei Yin. 2024. MILL: Mutual Verification with Large Language Models for Zero-Shot Query Expansion. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers). 2498-2518.", + "[49] Guangyuan Jiang, Manjie Xu, Song-Chun Zhu, Wenjuan Han, Chi Zhang, and Yixin Zhu. 2023. Evaluating and inducing personality in pre-trained language models. Advances in Neural Information Processing Systems 36 (2023), 10622-10643.", + "[50] Hideaki Joko, Shubham Chatterjee, Andrew Ramsay, Arjen P De Vries, Jeff Dalton, and Faegheh Hasibi. 2024. Doing personal laps: Llm-augmented dialogue construction for personalized multi-session conversational search. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 796-806.", + "[51] Wang-Cheng Kang, Jianmo Ni, Nikhil Mehta, Maheswaran Sathiamoorthy, Lichan Hong, Ed Chi, and Derek Zhiyuan Cheng. 2023. Do llms understand user preferences? evaluating llms on user rating prediction. arXiv preprint arXiv:2305.06474 (2023).", + "[52] Manojkumar Rangasamy Kannadasan and Grigor Aslanyan. 2019. Personalized query auto-completion through a lightweight representation of the user context. arXiv preprint arXiv:1905.01386 (2019).", + "[53] Anjuli Kannan, Karol Kurach, Sujith Ravi, Tobias Kaufmann, Andrew Tomkins, Balint Miklos, Greg Corrado, Laszlo Lukacs, Marina Ganea, Peter Young, and Vivek Ramavajjala. 2016. Smart Reply: Automated Response Suggestion for Email. In Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (San Francisco, California, USA) (KDD '16). Association for Computing Machinery, New York, NY, USA, 955-964. https://doi.org/10.1145/2939672.2939801", + "[54] Mandar Kulkarni, Praveen Tangarajan, Kyung Kim, and Anusua Trivedi. 2024. Reinforcement learning for optimizing rag for domain chatbots. arXiv preprint arXiv:2401.06800 (2024).", + "[55] Ishita Kumar, Snigdha Viswanathan, Sushrita Yerra, Alireza Salemi, Ryan A Rossi, Franck Dernoncourt, Hanieh Deilamsalehy, Xiang Chen, Ruiyi Zhang, Shubham Agarwal, et al. 2024. Longlamp: A benchmark for personalized long-form text generation. arXiv preprint arXiv:2407.11016 (2024).", + "[56] Brenden Lake and Marco Baroni. 2018. Generalization without systematicity: On the compositional skills of sequence-to-sequence recurrent networks. In International conference on machine learning. PMLR, 2873-2882.", + "[57] Cheng Li, Mingyang Zhang, Qiao zhu Mei, Weize Kong, and Michael Bendersky. 2024. Learning to rewrite prompts for personalized text generation. In Proceedings of the ACM Web Conference 2024. 3367-3378.", + "[58] Changhao Li, Yuchen Zhuang, Rushi Qiang, Haotian Sun, Hanjun Dai, Chao Zhang, and Bo Dai. 2024. Matryoshka: Learning to Drive Black-Box LLMs with LLMs. arXiv preprint arXiv:2410.20749 (2024).", + "[59] Lei Li, Yongfeng Zhang, and Li Chen. 2023. Personalized prompt learning for explainable recommendation. ACM Transactions on Information Systems 41, 4 (2023), 1-26.", + "[60] Sen Li, Fuyu Lv, Taiwei Jin, Guiyang Li, Yukun Zheng, Tao Zhuang, Qingwen Liu, Xiaoyi Zeng, James Kwok, and Qianli Ma. 2022. Query rewriting in taobao search. In Proceedings of the 31st ACM International Conference on Information & Knowledge Management. 3262-3271." + ], + "bbox": [ + 181, + 123, + 880, + 827 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 176, + 90, + 459, + 102 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "X. Li and P. Jia, et al.", + "bbox": [ + 779, + 90, + 880, + 101 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 522, + 830, + 534, + 840 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[61] Xiaopeng Li, Lixin Su, Pengyue Jia, Xiangyu Zhao, Suqi Cheng, Junfeng Wang, and Dawei Yin. 2023. Agent4ranking: Semantic robust ranking via personalized query rewriting using multi-agent llm. arXiv preprint arXiv:2312.15450 (2023).", + "[62] Xinyu Li, Ruiyang Zhou, Zachary C Lipton, and Liu Leqi. 2024. Personalized language modeling from personalized human feedback. arXiv preprint arXiv:2402.05133 (2024).", + "[63] Yuanchun Li, Hao Wen, Weijun Wang, Xiangyu Li, Yizhen Yuan, Guohong Liu, Jiacheng Liu, Wenxing Xu, Xiang Wang, Yi Sun, et al. 2024. Personal llm agents: Insights and survey about the capability, efficiency and security. arXiv preprint arXiv:2401.05459 (2024).", + "[64] Zehan Li, Xin Zhang, Yanzhao Zhang, Dingkun Long, Pengjun Xie, and Meishan Zhang. 2023. Towards general text embeddings with multi-stage contrastive learning. arXiv preprint arXiv:2308.03281 (2023).", + "[65] Ruixue Lian, Sixing Lu, Clint Solomon, Gustavo Aguilar, Pragaash Ponnusamy, Jialong Han, Chengyuan Ma, and Chenlei Guo. 2023. PersonalTM: Transformer memory for personalized retrieval. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval. 2256-2260.", + "[66] Shan-Mu Lin and Chuen-Min Huang. 2006. Personalized optimal search in local query expansion. In Proceedings of the 18th Conference on Computational Linguistics and Speech Processing. 221-236.", + "[67] Junling Liu, Chao Liu, Peilin Zhou, Renjie Lv, Kang Zhou, and Yan Zhang. 2023. Is chatgpt a good recommender? a preliminary study. arXiv preprint arXiv:2304.10149 (2023).", + "[68] Jiahong Liu, Zexuan Qiu, Zhongyang Li, Quanyu Dai, Jieming Zhu, Minda Hu, Menglin Yang, and Irwin King. 2025. A Survey of Personalized Large Language Models: Progress and Future Directions. arXiv preprint arXiv:2502.11528 (2025).", + "[69] Nelson F Liu, Kevin Lin, John Hewitt, Ashwin Paranjape, Michele Bevilacqua, Fabio Petroni, and Percy Liang. 2024. Lost in the middle: How language models use long contexts. Transactions of the Association for Computational Linguistics 12 (2024), 157-173.", + "[70] Qijiong Liu, Nuo Chen, Tetsuya Sakai, and Xiao-Ming Wu. 2024. Once: Boosting content-based recommendation with both open-and closed-source large language models. In Proceedings of the 17th ACM International Conference on Web Search and Data Mining. 452-461.", + "[71] Shuai Liu, Hyundong J Cho, Marjorie Freedman, Xuezhe Ma, and Jonathan May. 2023. RECAP: retrieval-enhanced context-aware prefix encoder for personalized dialogue response generation. arXiv preprint arXiv:2306.07206 (2023).", + "[72] Tyler Lu and Craig Boutilier. 2011. Budgeted social choice: From consensus to personalized decision making. In *IJCAI*, Vol. 11, 280-286.", + "[73] Zhengyi Ma, Zhicheng Dou, Yutao Zhu, Hanxun Zhong, and Ji-Rong Wen. 2021. One chatbot per person: Creating personalized chatbots based on implicit user profiles. In Proceedings of the 44th international ACM SIGIR conference on research and development in information retrieval. 555-564.", + "[74] Aman Madaan, Niket Tandon, Peter Clark, and Yiming Yang. 2022. Memory-assisted prompt editing to improve GPT-3 after deployment. arXiv preprint arXiv:2201.06009 (2022).", + "[75] Shengyu Mao, Xiaohan Wang, Mengru Wang, Yong Jiang, Pengjun Xie, Fei Huang, and Ningyu Zhang. 2024. Editing Personality for Large Language Models. In CCF International Conference on Natural Language Processing and Chinese Computing. Springer, 241-254.", + "[76] Yuren Mao, Xuemei Dong, Wenyi Xu, Yunjun Gao, Bin Wei, and Ying Zhang. 2024. Fit-rag: black-box rag with factual information and token reduction. arXiv preprint arXiv:2403.14374 (2024).", + "[77] Puneet Mathur, Zhe Liu, Ke Li, Yingyi Ma, Gil Keren, Zeeshan Ahmed, Dinesh Manocha, and Xuedong Zhang. 2023. Personal: Language model personalization via domain-distributed span aggregated k-nearest n-gram retrieval augmentation. In Findings of the Association for Computational Linguistics: EMNLP 2023. 11314-11328.", + "[78] Fatemehsadat Mireshghallah, Vaishnavi Shrivastava, Milad Shokouhi, Taylor Berg-Kirkpatrick, Robert Sim, and Dimitrios Dimitriadis. 2021. Identifier: Implicit user representations for simple and effective personalized sentiment analysis. arXiv preprint arXiv:2110.00135 (2021).", + "[79] Philippe Mulhem, Nawal Ould Amer, and Mathias Gery. 2016. Axiomatic term-based personalized query expansion using bookmarking system. In International Conference on Database and Expert Systems Applications. Springer, 235-243.", + "[80] Sheshera Mysore, Zhuoran Lu, Mengting Wan, Longqi Yang, Steve Menezes, Tina Baghaee, Emmanuel Barajas Gonzalez, Jennifer Neville, and Tara Safavi. 2023. Pearl: Personalizing large language model writing assistants with generation-calibrated retrievers. arXiv preprint arXiv:2311.09180 (2023).", + "[81] Dang Nguyen, Jian Chen, Yu Wang, Gang Wu, Namyong Park, Zhengmian Hu, Hanjia Lyu, Junda Wu, Ryan Aponte, Yu Xia, et al. 2024. Gui agents: A survey. arXiv preprint arXiv:2412.13501 (2024).", + "[82] Duy A Nguyen, Rishi Kesav Mohan, Van Yang, Pritom Saha Akash, and Kevin Chen-Chuan Chang. 2025. RL-based Query Rewriting with Distilled LLM for online E-Commerce Systems. arXiv preprint arXiv:2501.18056 (2025).", + "[83] Jianmo Ni, Jiacheng Li, and Julian McAuley. 2019. Justifying recommendations using distantly-labeled reviews and fine-grained aspects. In Proceedings of the 2019 conference on empirical methods in natural language processing and the 9th international joint conference on natural language processing (EMNLP-IJCNLP). 188-197.", + "[84] Lin Ning, Luyang Liu, Jiaxing Wu, Neo Wu, Devora Berlowitz, Sushant Prakash, Bradley Green, Shawn O'Banion, and Jun Xie. 2024. User-llm: Efficient llm contextualization with user embeddings. arXiv preprint arXiv:2402.13598 (2024).", + "[85] Douglas Oard, William Webber, David Kirsch, and Sergey Golitsynski. 2015. Avocado research email collection. Philadelphia: Linguistic Data Consortium (2015).", + "[86] U.S. National Library of Medicine. [n.d.]. PubMed: A Free Resource for Biomedical Literature. https://pubmed.ncbi.nlm.nih.gov/", + "[87] Joon Sung Park, Joseph O'Brien, Carrie Jun Cai, Meredith Ringel Morris, Percy Liang, and Michael S Bernstein. 2023. Generative agents: Interactive simulacra of human behavior. In Proceedings of the 36th annual acm symposium on user interface software and technology. 1-22." + ], + "bbox": [ + 122, + 123, + 821, + 827 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "A Survey of Personalization: From RAG to Agent", + "bbox": [ + 116, + 90, + 346, + 102 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 539, + 90, + 821, + 101 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 462, + 830, + 473, + 840 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[88] Greg Pass, Abdur Chowdhury, and Cayley Torgeson. 2006. A picture of search. In Proceedings of the 1st International Conference on Scalable Information Systems (Hong Kong) (InfoScale '06). Association for Computing Machinery, New York, NY, USA, 1-es. https://doi.org/10.1145/1146847.1146848", + "[89] Vadim Igorevich Pavliukevich, Alina Khasanovna Zherdeva, Olesya Vladimirovna Makhnytkina, and Dmitriy Viktorovich Dyrmovskiy. [n. d.]. Improving RAG with LoRA finetuning for persona text generation. ([n. d.]).", + "[90] Dan Peng, Zhihui Fu, and Jun Wang. 2024. Pocketllm: Enabling on-device fine-tuning for personalized llms. arXiv preprint arXiv:2407.01031 (2024).", + "[91] Qiyao Peng, Hongtao Liu, Hongyan Xu, Qing Yang, Minglai Shao, and Wenjun Wang. 2024. Review-LLM: Harnessing Large Language Models for Personalized Review Generation. arXiv:2407.07487 [cs.CL] https://arxiv.org/abs/2407.07487", + "[92] Hongjin Qian, Zhicheng Dou, Yutao Zhu, Yueyuan Ma, and Ji-Rong Wen. 2021. Learning implicit user profile for personalized retrieval-based chatbot. In proceedings of the 30th ACM international conference on Information & Knowledge Management. 1467-1477.", + "[93] Hongjin Qian, Xiahe Li, Hanxun Zhong, Yu Guo, Yueyuan Ma, Yutao Zhu, Zhanliang Liu, Zhicheng Dou, and Ji-Rong Wen. 2021. Pchatbot: a large-scale dataset for personalized chatbot. In Proceedings of the 44th international ACM SIGIR conference on research and development in information retrieval. 2470-2477.", + "[94] Xiaoru Qu, Yifan Wang, Zhao Li, and Jun Gao. 2024. Graph-enhanced prompt learning for personalized review generation. Data Science and Engineering 9, 3 (2024), 309-324.", + "[95] A. Rajaraman and J.D. Ullman. 2011. Mining of Massive Datasets. Cambridge University Press. https://books.google.co.uk/books?id=OefRhZyYOb0C", + "[96] Yiting Ran, Xintao Wang, Rui Xu, Xinfeng Yuan, Jiaqing Liang, Deqing Yang, and Yanghua Xiao. 2024. Capturing minds, not just words: Enhancing role-playing language models with personality-indicative data. arXiv preprint arXiv:2406.18921 (2024).", + "[97] Chandan K. Reddy, Lluis Marquez, Fran Valero, Nikhil Rao, Hugo Zaragoza, Sambaran Bandyopadhyay, Arnab Biswas, Anlu Xing, and Karthik Subbian. 2022. Shopping Queries Dataset: A Large-Scale ESCI Benchmark for Improving Product Search. (2022). arXiv:2206.06588", + "[98] Nils Reimers and Iryna Gurevych. 2019. Sentence-bert: Sentence embeddings using siamese bert-networks. arXiv preprint arXiv:1908.10084 (2019).", + "[99] Ruiyang Ren, Peng Qiu, Yingqi Qu, Jing Liu, Wayne Xin Zhao, Hua Wu, Ji-Rong Wen, and Haifeng Wang. 2024. Bases: Large-scale web search user simulation with large language model based agents. arXiv preprint arXiv:2402.17505 (2024).", + "[100] Matthew Renze and Erhan Guven. 2024. Self-reflection in llm agents: Effects on problem-solving performance. arXiv preprint arXiv:2405.06682 (2024).", + "[101] Chris Richardson, Yao Zhang, Kellen Gillespie, Sudipta Kar, Arshdeep Singh, Zeynab Raeesy, Omar Zia Khan, and Abhinav Sethy. 2023. Integrating summarization and retrieval for enhanced personalization via large language models. arXiv preprint arXiv:2310.20081 (2023).", + "[102] Stephen Robertson, Hugo Zaragoza, et al. 2009. The probabilistic relevance framework: BM25 and beyond. Foundations and Trends in Information Retrieval 3, 4 (2009), 333-389.", + "[103] Alireza Salemi, Surya Kallumadi, and Hamed Zamani. 2024. Optimization methods for personalizing large language models through retrieval augmentation. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 752-762.", + "[104] Alireza Salemi, Cheng Li, Mingyang Zhang, Qiao zhu Mei, Weize Kong, Tao Chen, Zhuowan Li, Michael Bendersky, and Hamed Zamani. 2025. Reasoning-Enhanced Self-Training for Long-Form Personalized Text Generation. arXiv preprint arXiv:2501.04167 (2025).", + "[105] Alireza Salemi, Sheshera Mysore, Michael Bendersky, and Hamed Zamani. 2024. LaMP: When Large Language Models Meet Personalization. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). 7370-7392.", + "[106] Alireza Salemi and Hamed Zamani. 2024. Learning to Rank for Multiple Retrieval-Augmented Models through Iterative Utility Maximization. arXiv preprint arXiv:2410.09942 (2024).", + "[107] Shibani Santurkar, Esin Durmus, Faisal Ladhak, Cinoo Lee, Percy Liang, and Tatsunori Hashimoto. 2023. Whose opinions do language models reflect?. In International Conference on Machine Learning. PMLR, 29971-30004.", + "[108] Rossano Schifanella, Alain Barrat, Ciro Cattuto, Benjamin Markines, and Filippo Menczer. 2010. Folks in folksonomies: social link prediction from shared metadata. In Proceedings of the third ACM international conference on Web search and data mining. 271-280.", + "[109] Noor Shaker, Georgios Yannakakis, and Julian Togelius. 2010. Towards automatic personalized content generation for platform games. In Proceedings of the AAAI Conference on Artificial Intelligence and Interactive Digital Entertainment, Vol. 6. 63-68.", + "[110] Yunfan Shao, Linyang Li, Junqi Dai, and Xipeng Qiu. 2023. Character-llm: A trainable agent for role-playing. arXiv preprint arXiv:2310.10158 (2023).", + "[111] Jocelyn Shen, Joel Mire, Hae Won Park, Cynthia Breazeal, and Maarten Sap. 2024. HEART-felt Narratives: Tracing Empathy and Narrative Style in Personal Stories with LLMs. arXiv preprint arXiv:2405.17633 (2024).", + "[112] Yunxiao Shi, Xing Zi, Zijing Shi, Haimin Zhang, Qiang Wu, and Min Xu. 2024. Eragent: Enhancing retrieval-augmented language models with improved accuracy, efficiency, and personalization. arXiv preprint arXiv:2405.06683 (2024).", + "[113] Aditi Singh, Abul Ehtesham, Saket Kumar, and Tala Talaei Khoei. 2025. Agentic Retrieval-Augmented Generation: A Survey on Agentic RAG. arXiv preprint arXiv:2501.09136 (2025).", + "[114] Harmanpreet Singh, Nikhil Verma, Yixiao Wang, Manasa Bharadwaj, Homa Fashandi, Kevin Ferreira, and Chul Lee. 2024. Personal Large Language Model Agents: A Case Study on Tailored Travel Planning. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: Industry Track. 486-514.", + "[115] Shamane Siriwardhana, Rivindu Weerasekera, Elliott Wen, Tharindu Kaluarachchi, Rajib Rana, and Suranga Nanayakkara. 2023. Improving the domain adaptation of retrieval augmented generation (RAG) models for open domain question answering. Transactions of the Association for Computational Linguistics 11 (2023), 1-17.", + "[116] Mingyang Song and Mao Zheng. 2024. A Survey of Query Optimization in Large Language Models. arXiv preprint arXiv:2412.17558 (2024)." + ], + "bbox": [ + 179, + 123, + 880, + 827 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 176, + 90, + 459, + 102 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "X. Li and P. Jia, et al.", + "bbox": [ + 779, + 90, + 880, + 101 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 522, + 830, + 535, + 839 + ], + "page_idx": 21 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[117] Spotify. 2023. Annoy: Approximate Nearest Neighbors in C++/Python. https://github.com/spotify/annoy", + "[118] Stuck_In_the Matrix. 2015. Reddit Public Comments (2007-10 through 2015-05). (2015). https://www.reddit.com/r/datasets/comments/3bxlg7/i_have EVERY_publicly-available Reddit_COMMENT/", + "[119] Lei Sun, Jinming Zhao, and Qin Jin. 2024. Revealing Personality Traits: A New Benchmark Dataset for Explanable Personality Recognition on Dialogues. arXiv preprint arXiv:2409.19723 (2024).", + "[120] Zhaoxuan Tan, Zheyuan Liu, and Meng Jiang. 2024. Personalized pieces: Efficient personalized large language models through collaborative efforts. arXiv preprint arXiv:2406.10471 (2024).", + "[121] Zhaoxuan Tan, Qingkai Zeng, Yijun Tian, Zheyuan Liu, Bing Yin, and Meng Jiang. 2024. Democratizing large language models via personalized parameter-efficient fine-tuning. arXiv preprint arXiv:2402.04401 (2024).", + "[122] Zhaoxuan Tan, Qingkai Zeng, Yijun Tian, Zheyuan Liu, Bing Yin, and Meng Jiang. 2025. Democratizing Large Language Models via Personalized Parameter-Efficient Fine-tuning. arXiv:2402.04401 [cs.CL] https://arxiv.org/abs/2402.04401", + "[123] Quan Tu, Shilong Fan, Zihang Tian, and Rui Yan. 2024. Charactereval: A Chinese benchmark for role-playing conversational agent evaluation. arXiv preprint arXiv:2401.01275 (2024).", + "[124] Cornell University. [n.d.]. arXiv: An Open Access Repository for Research. https://arxiv.org/", + "[125] Hemanth Vemuri, Sheshansh Agrawal, Shivam Mittal, Deepak Saini, Akshay Soni, Abhinav V Sambasivan, Wenhao Lu, Yajun Wang, Mehul Parsana, Purushottam Kar, et al. 2023. Personalized retrieval over millions of items. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1014-1022.", + "[126] Bryan Wang, Gang Li, and Yang Li. 2023. Enabling conversational interaction with mobile ui using large language models. In Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems. 1-17.", + "[127] Guanzhi Wang, Yuqi Xie, Yunfan Jiang, Ajay Mandlekar, Chaowei Xiao, Yuke Zhu, Linxi Fan, and Anima Anandkumar. 2023. Voyager: An open-ended embodied agent with large language models. arXiv preprint arXiv:2305.16291 (2023).", + "[128] Hongru Wang, Wenyu Huang, Yang Deng, Rui Wang, Zezhong Wang, Yufei Wang, Fei Mi, Jeff Z Pan, and Kam-Fai Wong. 2024. Unims-rag: A unified multi-source retrieval-augmented generation for personalized dialogue systems. arXiv preprint arXiv:2401.13256 (2024).", + "[129] Hongru Wang, Rui Wang, Fei Mi, Yang Deng, Zezhong Wang, Bin Liang, Ruifeng Xu, and Kam-Fai Wong. 2023. Cue-CoT: Chain-of-thought prompting for responding to in-depth dialogue questions with LLMs. arXiv preprint arXiv:2305.11792 (2023).", + "[130] Jian Wang, Yi Cheng, Dongding Lin, Chak Tou Leong, and Wenjie Li. 2023. Target-oriented proactive dialogue systems with personalization: Problem formulation and dataset curation. arXiv preprint arXiv:2310.07397 (2023).", + "[131] Lei Wang, Chen Ma, Xueyang Feng, Zeyu Zhang, Hao Yang, Jingsen Zhang, Zhiyuan Chen, Jiakai Tang, Xu Chen, Yankai Lin, et al. 2024. A survey on large language model based autonomous agents. Frontiers of Computer Science 18, 6 (2024), 186345.", + "[132] Liang Wang, Nan Yang, and Furu Wei. 2023. Query2doc: Query expansion with large language models. arXiv preprint arXiv:2303.07678 (2023).", + "[133] Lei Wang, Jingsen Zhang, Hao Yang, Zhiyuan Chen, Jiakai Tang, Zeyu Zhang, Xu Chen, Yankai Lin, Ruihua Song, Wayne Xin Zhao, et al. 2023. User behavior simulation with large language model based agents. arXiv preprint arXiv:2306.02552 (2023).", + "[134] Xintao Wang, Yunze Xiao, Jen-tse Huang, Siyu Yuan, Rui Xu, Haoran Guo, Quan Tu, Yaying Fei, Ziang Leng, Wei Wang, et al. 2023. Incharacter: Evaluating personality fidelity in role-playing agents through psychological interviews. arXiv preprint arXiv:2310.17976 (2023).", + "[135] Yixiao Wang, Homa Fashandi, and Kevin Ferreira. 2024. Investigating the Personality Consistency in Quantized Role-Playing Dialogue Agents. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: Industry Track. 239–255.", + "[136] Yu Wang, Yifan Gao, Xiusi Chen, Haoming Jiang, Shiyang Li, Jingfeng Yang, Qingyu Yin, Zheng Li, Xian Li, Bing Yin, et al. [n.d.]. MEMORYLLM: Towards Self-Updatable Large Language Models. In Forty-first International Conference on Machine Learning.", + "[137] Zheng Wang, Zhongyang Li, Zeren Jiang, Dandan Tu, and Wei Shi. 2024. Crafting Personalized Agents through Retrieval-Augmented Generation on Editable Memory Graphs. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing. 4891-4906.", + "[138] Zijie J Wang and Duen Horng Chau. 2024. MeMemo: On-device Retrieval Augmentation for Private and Personalized Text Generation. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 2765-2770.", + "[139] Zekun Moore Wang, Zhongyuan Peng, Haoran Que, Jiaheng Liu, Wangchunshu Zhou, Yuhan Wu, Hongcheng Guo, Ruitong Gan, Zehao Ni, Jian Yang, et al. 2023. Rolellm: Benchmarking, eliciting, and enhancing role-playing abilities of large language models. arXiv preprint arXiv:2310.00746 (2023).", + "[140] Tianxin Wei, Bowen Jin, Ruirui Li, Hansi Zeng, Zhengyang Wang, Jianhui Sun, Qingyu Yin, Hanqing Lu, Suhang Wang, Jingrui He, et al. 2024. Towards unified multi-modal personalization: Large vision-language models for generative recommendation and beyond. arXiv preprint arXiv:2403.10667 (2024).", + "[141] Robert Wetzker, Carsten Zimmermann, and Christian Bauchage. 2008. Analyzing social bookmarking systems: A del. icio. us cookbook. In Proceedings of the ECAI 2008 Mining Social Data Workshop. 26-30.", + "[142] Stanisław Wozniak, Bartlomiej Koptyra, Arkadiusz Janz, Przemysław Kazienko, and Jan Kocón. 2024. Personalized large language models. arXiv preprint arXiv:2402.09269 (2024).", + "[143] Junde Wu, Jiayuan Zhu, Yunli Qi, Jingkun Chen, Min Xu, Filippo Menolascina, and Vicente Grau. 2024. Medical graph rag: Towards safe medical large language model via graph retrieval-augmented generation. arXiv preprint arXiv:2408.04187 (2024).", + "[144] Xuan Wu, Dong Zhou, Yu Xu, and Seamus Lawless. 2017. Personalized query expansion utilizing multi-relational social data. In 2017 12th International Workshop on Semantic and Social Media Adaptation and Personalization (SMAP). IEEE, 65-70." + ], + "bbox": [ + 117, + 123, + 820, + 825 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "A Survey of Personalization: From RAG to Agent", + "bbox": [ + 114, + 90, + 348, + 102 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 539, + 90, + 821, + 101 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 462, + 830, + 473, + 840 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[145] Yunjia Xi, Weiwen Liu, Jianghao Lin, Xiaoling Cai, Hong Zhu, Jieming Zhu, Bo Chen, Ruiming Tang, Weinan Zhang, and Yong Yu. 2024. Towards open-world recommendation with knowledge augmentation from large language models. In Proceedings of the 18th ACM Conference on Recommender Systems. 12-22.", + "[146] Zhiheng Xi, Wenxiang Chen, Xin Guo, Wei He, Yiwen Ding, Boyang Hong, Ming Zhang, Junzhe Wang, Senjie Jin, Enyu Zhou, et al. 2025. The rise and potential of large language model based agents: A survey. Science China Information Sciences 68, 2 (2025), 121101.", + "[147] Shitao Xiao, Zheng Liu, Peitian Zhang, Niklas Muennighoff, Defu Lian, and Jian-Yun Nie. 2024. C-pack: Packed resources for general chinese embeddings. In Proceedings of the 47th international ACM SIGIR conference on research and development in information retrieval. 641-649.", + "[148] Huatao Xu, Liying Han, Qirui Yang, Mo Li, and Mani Srivastava. 2024. Penetrative ai: Making llms comprehend the physical world. In Proceedings of the 25th International Workshop on Mobile Computing Systems and Applications. 1-7.", + "[149] Hongyan Xu, Hongtao Liu, Pengfei Jiao, and Wenjun Wang. 2021. Transformer reasoning network for personalized review summarization. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1452-1461.", + "[150] Xinchao Xu, Zhibin Gou, Wenquan Wu, Zheng-Yu Niu, Hua Wu, Haifeng Wang, and Shihang Wang. 2022. Long time no see! open-domain conversation with long-term persona memory. arXiv preprint arXiv:2203.05797 (2022).", + "[151] Yiyan Xu, Jinghao Zhang, Alireza Salemi, Xinting Hu, Wenjie Wang, Fuli Feng, Hamed Zamani, Xiangnan He, and Tat-Seng Chua. 2025. Personalized Generation In Large Model Era: A Survey. arXiv preprint arXiv:2503.02614 (2025).", + "[152] Hao Yu, Xin Yang, Xin Gao, Yan Kang, Hao Wang, Junbo Zhang, and Tianrui Li. 2024. Personalized federated continual learning via multi-granularity prompt. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 4023-4034.", + "[153] Xiaoyan Yu, Tongxu Luo, Yifan Wei, Fangyu Lei, Yiming Huang, Hao Peng, and Liehuang Zhu. 2024. Neeko: Leveraging dynamic lora for efficient multi-character role-playing agent. arXiv preprint arXiv:2402.13717 (2024).", + "[154] Xinfeng Yuan, Siyu Yuan, Yuhan Cui, Tianhe Lin, Xintao Wang, Rui Xu, Jiangjie Chen, and Deqing Yang. 2024. Evaluating character understanding of large language models via character profiling from fictional works. arXiv preprint arXiv:2404.12726 (2024).", + "[155] Hansi Zeng, Surya Kallumadi, Zaid Alibadi, Rodrigo Nogueira, and Hamed Zamani. 2023. A personalized dense retrieval framework for unified information access. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval. 121-130.", + "[156] Saber Zerhoudi and Michael Granitzer. 2024. PersonaRAG: Enhancing Retrieval-Augmented Generation Systems with User-Centric Agents. arXiv preprint arXiv:2407.09394 (2024).", + "[157] Han Zhang, Songlin Wang, Kang Zhang, Zhiling Tang, Yunjiang Jiang, Yun Xiao, Weipeng Yan, and Wen-Yun Yang. 2020. Towards personalized and semantic retrieval: An end-to-end solution for e-commerce search via embedding learning. In Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval. 2407-2416.", + "[158] Jiarui Zhang. 2024. Guided profile generation improves personalization with llms. arXiv preprint arXiv:2409.13093 (2024).", + "[159] Jesse Zhang, Jiahui Zhang, Karl Pertsch, Ziyi Liu, Xiang Ren, Minsuk Chang, Shao-Hua Sun, and Joseph J Lim. [n.d.]. Bootstrap Your Own Skills: Learning to Solve New Tasks with Large Language Model Guidance. In 7th Annual Conference on Robot Learning.", + "[160] Kai Zhang, Yangyang Kang, Fubang Zhao, and Xiaozhong Liu. 2023. LLM-based medical assistant personalization with short-and long-term memory coordination. arXiv preprint arXiv:2309.11696 (2023).", + "[161] Kaiyan Zhang, Jianyu Wang, Ermo Hua, Biqing Qi, Ning Ding, and Bowen Zhou. 2024. Cogenesis: A framework collaborating large and small language models for secure context-aware instruction following. arXiv preprint arXiv:2403.03129 (2024).", + "[162] Kai Zhang, Fubang Zhao, Yangyang Kang, and Xiaozhong Liu. 2023. Memory-augmented llm personalization with short-and long-term memory coordination. arXiv preprint arXiv:2309.11696 (2023).", + "[163] Wenlin Zhang, Chuhan Wu, Xiangyang Li, Yuhao Wang, Kuicai Dong, Yichao Wang, Xinyi Dai, Xiangyu Zhao, Huifeng Guo, and Ruiming Tang. 2025. LLMTreeRec: Unleashing the Power of Large Language Models for Cold-Start Recommendations. In Proceedings of the 31st International Conference on Computational Linguistics. 886-896.", + "[164] Yanyue Zhang, Yulan He, and Deyu Zhou. 2025. Rehearse With User: Personalized Opinion Summarization via Role-Playing based on Large Language Models. arXiv preprint arXiv:2503.00449 (2025).", + "[165] You Zhang, Jin Wang, Liang-Chih Yu, Dan Xu, and Xuejie Zhang. 2024. Personalized LoRA for human-centered text understanding. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 38. 1958-19596.", + "[166] Yabin Zhang, Wenhui Yu, Erhan Zhang, Xu Chen, Lantao Hu, Peng Jiang, and Kun Gai. 2024. Recgpt: Generative personalized prompts for sequential recommendation via chatgpt training paradigm. arXiv preprint arXiv:2404.08675 (2024).", + "[167] Zeyu Zhang, Xiaohe Bo, Chen Ma, Rui Li, Xu Chen, Quanyu Dai, Jieming Zhu, Zhenhua Dong, and Ji-Rong Wen. 2024. A survey on the memory mechanism of large language model based agents. arXiv preprint arXiv:2404.13501 (2024).", + "[168] Zhehao Zhang, Ryan A Rossi, Branislav Kveton, Yijia Shao, Diyi Yang, Hamed Zamani, Franck Dernoncourt, Joe Barrow, Tong Yu, Sungchul Kim, et al. 2024. Personalization of large language models: A survey. arXiv preprint arXiv:2411.00027 (2024).", + "[169] Yi Zheng, Chongyang Ma, Kanle Shi, and Haibin Huang. 2023. Agents meet okr: An object and key results driven agent system with hierarchical self-collaboration and self-evaluation. arXiv preprint arXiv:2311.16542 (2023).", + "[170] Hanxun Zhong, Zhicheng Dou, Yutao Zhu, Hongjin Qian, and Ji-Rong Wen. 2022. Less is more: Learning to refine dialogue history for personalized dialogue generation. arXiv preprint arXiv:2204.08128 (2022).", + "[171] Wanjun Zhong, Duyu Tang, Jiahai Wang, Jian Yin, and Nan Duan. 2021. UserAdapter: Few-shot user learning in sentiment analysis. In Findings of the Association for Computational Linguistics: ACL-JJCNLP 2021. 1484-1488." + ], + "bbox": [ + 176, + 123, + 880, + 825 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 176, + 90, + 459, + 102 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "X. Li and P. Jia, et al.", + "bbox": [ + 779, + 90, + 880, + 101 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 522, + 830, + 535, + 840 + ], + "page_idx": 23 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[172] Dong Zhou, Séamus Lawless, and Vincent Wade. 2012. Improving search via personalized query expansion using social media. Information retrieval 15 (2012), 218-242.", + "[173] Denny Zhou, Nathanael Scharli, Le Hou, Jason Wei, Nathan Scales, Xuezhi Wang, Dale Schuurmans, Claire Cui, Olivier Bousquet, Quoc Le, et al. 2022. Least-to-most prompting enables complex reasoning in large language models. arXiv preprint arXiv:2205.10625 (2022).", + "[174] Yujia Zhou, Qiannan Zhu, Jiajie Jin, and Zhicheng Dou. 2024. Cognitive personalized search integrating large language models with an efficient memory mechanism. In Proceedings of the ACM Web Conference 2024. 1464-1473.", + "[175] Yuchen Zhuang, Haotian Sun, Yue Yu, Rushi Qiang, Qifan Wang, Chao Zhang, and Bo Dai. [n.d.]. Hydra: Model factorization framework for black-box llm personalization, 2024. URL https://arxiv.org/abs/2406.02888 ([n.d.])." + ], + "bbox": [ + 117, + 123, + 821, + 223 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Received 20 February 2007; revised 12 March 2009; accepted 5 June 2009", + "bbox": [ + 116, + 238, + 503, + 250 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "A Survey of Personalization: From RAG to Agent", + "bbox": [ + 116, + 90, + 346, + 102 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY", + "bbox": [ + 539, + 90, + 821, + 101 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 462, + 830, + 473, + 840 + ], + "page_idx": 24 + } +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10147/26499afc-f1b2-4507-8b62-4adc6ac17e5f_model.json b/data/2025/2504_10xxx/2504.10147/26499afc-f1b2-4507-8b62-4adc6ac17e5f_model.json new file mode 100644 index 0000000000000000000000000000000000000000..6bae7212863b8d4d15d5aeea4d579ddbf29f8b10 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10147/26499afc-f1b2-4507-8b62-4adc6ac17e5f_model.json @@ -0,0 +1,4650 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.273, + 0.061, + 0.709 + ], + "angle": 270, + "content": "arXiv:2504.10147v1 [cs.IR] 14 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.117, + 0.551, + 0.136 + ], + "angle": 0, + "content": "A Survey of Personalization: From RAG to Agent" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.149, + 0.495, + 0.167 + ], + "angle": 0, + "content": "XIAOPENG LI*, City University of Hong Kong, Hong Kong" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.17, + 0.497, + 0.186 + ], + "angle": 0, + "content": "PENGYUE JIA*, City University of Hong Kong, Hong Kong" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.19, + 0.822, + 0.207 + ], + "angle": 0, + "content": "DERONG XU, City University of Hong Kong, Hong Kong and University of Science and Technology of China, China" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.211, + 0.445, + 0.227 + ], + "angle": 0, + "content": "YI WEN, City University of Hong Kong, Hong Kong" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.232, + 0.773, + 0.248 + ], + "angle": 0, + "content": "YINGYI ZHANG, City University of Hong Kong, Hong Kong and Dalian University of Technology, China" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.252, + 0.516, + 0.269 + ], + "angle": 0, + "content": "WENLIN ZHANG, City University of Hong Kong, Hong Kong" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.273, + 0.502, + 0.289 + ], + "angle": 0, + "content": "WANYU WANG, City University of Hong Kong, Hong Kong" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.293, + 0.438, + 0.308 + ], + "angle": 0, + "content": "YICHAO WANG, Noah's Ark Lab, Huawei, China" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.314, + 0.456, + 0.329 + ], + "angle": 0, + "content": "ZHAOCHENG DU, Noah's Ark Lab, Huawei, China" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.334, + 0.435, + 0.349 + ], + "angle": 0, + "content": "XIANGYANG LI, Noah's Ark Lab, Huawei, China" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.355, + 0.422, + 0.37 + ], + "angle": 0, + "content": "YONG LIU, Noah's Ark Lab, Huawei, Singapore" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.375, + 0.436, + 0.39 + ], + "angle": 0, + "content": "HUIFENG GUO, Noah's Ark Lab, Huawei, China" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.394, + 0.451, + 0.41 + ], + "angle": 0, + "content": "RUIMING TANG†, Noah's Ark Lab, Huawei, China" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.416, + 0.519, + 0.432 + ], + "angle": 0, + "content": "XIANGYU ZHAO†, City University of Hong Kong, Hong Kong" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.443, + 0.823, + 0.598 + ], + "angle": 0, + "content": "Personalization has become an essential capability in modern AI systems, enabling customized interactions that align with individual user preferences, contexts, and goals. Recent research has increasingly concentrated on Retrieval-Augmented Generation (RAG) frameworks and their evolution into more advanced agent-based architectures within personalized settings to enhance user satisfaction. Building on this foundation, this survey systematically examines personalization across the three core stages of RAG: pre-retrieval, retrieval, and generation. Beyond RAG, we further extend its capabilities into the realm of Personalized LLM-based Agents, which enhance traditional RAG systems with agentic functionalities, including user understanding, personalized planning and execution, and dynamic generation. For both personalization in RAG and agent-based personalization, we provide formal definitions, conduct a comprehensive review of recent literature, and summarize key datasets and evaluation metrics. Additionally, we discuss fundamental challenges, limitations, and promising research directions in this evolving field. Relevant papers and resources are continuously updated at the Github Repo1." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.614, + 0.449, + 0.627 + ], + "angle": 0, + "content": "CCS Concepts: \\(\\cdot\\) Information systems \\(\\rightarrow\\) Personalization." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.643, + 0.742, + 0.657 + ], + "angle": 0, + "content": "Additional Key Words and Phrases: Large Language Model, Retrieval-Augmented Generation, Agent, Personalization" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.679, + 0.536, + 0.691 + ], + "angle": 0, + "content": "\\(^{1}\\)https://github.com/Applied-Machine-Learning-Lab/Awesome-Personalized-RAG-Agent" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.691, + 0.217, + 0.702 + ], + "angle": 0, + "content": "* Equal contribution." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.702, + 0.241, + 0.713 + ], + "angle": 0, + "content": "† Corresponding authors." + }, + { + "type": "list", + "bbox": [ + 0.116, + 0.679, + 0.536, + 0.713 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.737, + 0.823, + 0.787 + ], + "angle": 0, + "content": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.789, + 0.507, + 0.8 + ], + "angle": 0, + "content": "© 2018 Copyright held by the owner/author(s). Publication rights licensed to ACM." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.802, + 0.263, + 0.813 + ], + "angle": 0, + "content": "Manuscript submitted to ACM" + }, + { + "type": "page_number", + "bbox": [ + 0.465, + 0.832, + 0.472, + 0.841 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.177, + 0.091, + 0.46, + 0.103 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "header", + "bbox": [ + 0.781, + 0.091, + 0.882, + 0.102 + ], + "angle": 0, + "content": "X. Li and P. Jia, et al." + }, + { + "type": "title", + "bbox": [ + 0.176, + 0.123, + 0.324, + 0.135 + ], + "angle": 0, + "content": "ACM Reference Format:" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.138, + 0.885, + 0.199 + ], + "angle": 0, + "content": "Xiaopeng Li*, Pengyue Jia*, Derong Xu, Yi Wen, Yingyi Zhang, Wenlin Zhang, Wanyu Wang, Yichao Wang, Zhaocheng Du, Xiangyang Li, Yong Liu, Huifeng Guo, Ruiming Tang†, and Xiangyu Zhao†. 2018. A Survey of Personalization: From RAG to Agent. In Proceedings of Make sure to enter the correct conference title from your rights confirmation email (Conference acronym 'XX). ACM, New York, NY, USA, 25 pages. https://doi.org/XXXXXXXX.XXXXXXXXXX" + }, + { + "type": "title", + "bbox": [ + 0.177, + 0.217, + 0.32, + 0.23 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.239, + 0.885, + 0.34 + ], + "angle": 0, + "content": "Large Language Models (LLMs) have revolutionized AI-driven applications by enabling natural language understanding and generation at an unprecedented scale. However, these models often suffer from issues such as outdated responses and hallucinations, which severely hinder the accuracy of information generation. Retrieval-Augmented Generation (RAG) has emerged as a promising framework that integrates retrieved information from external corpora, such as external APIs [13, 36], scientific repositories [86, 124] or domain-specific databases [4, 31], ensuring more knowledge-grounded and up-to-date outputs." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.343, + 0.885, + 0.513 + ], + "angle": 0, + "content": "Its versatility has led to significant applications across various domains, including question answering [115], enterprise search [16] and healthcare [143], etc. Among these applications, one particularly notable area is in agent workflows, where RAG enhances autonomous systems by providing context-aware, dynamically retrieved, and reliable knowledge. This is because each stage of the RAG process closely mirrors key aspects of an agent's workflow, as shown in Figure 1. For instance, the query rewriting phase in RAG, which involves semantic understanding and parsing, aligns with the semantic comprehension stage in agent workflows. Likewise, RAG's retrieval phase, which focuses on extracting the most relevant documents, corresponds to the planning and execution phases of an agent, where decisions are made based on retrieved knowledge. Finally, the generation phase in RAG parallels an agent's execution stage, where actions are performed based on the given task. This structural alignment suggests that the architecture of RAG is fundamentally converging with agent workflows, solidifying its position as a key facilitator of intelligent and autonomous systems." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.516, + 0.885, + 0.686 + ], + "angle": 0, + "content": "Although the structural alignment between RAG and agent workflows highlights their deepening convergence, a critical next step in enhancing these intelligent systems lies in personalization. Personalization is a key driver toward achieving more adaptive and context-aware AI, which is fundamental for the progression toward Artificial General Intelligence (AGI). It plays an essential role in applications such as personalized reasoning [39, 149], adaptive decision-making [72], user-specific content generation [109, 151], and interactive AI systems [73, 92]. However, existing research lacks a comprehensive comparative analysis of personalized RAG and agentic approaches. Current surveys primarily focus on general RAG methodologies [32, 35] or agent-related literature [63, 131, 167], without systematically exploring their implications for personalization. While recent works such as [68, 168] discuss personalization, they predominantly address personalized generation within LLMs or specific downstream tasks, overlooking how personalization can be effectively integrated into RAG and agent workflows." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.688, + 0.885, + 0.738 + ], + "angle": 0, + "content": "Motivated by the above issues, this survey aims to provide a comprehensive review of the integration of personalization into RAG and agentic RAG frameworks to enhance user experiences and optimize satisfaction. The key contributions of this work can be summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.745, + 0.88, + 0.792 + ], + "angle": 0, + "content": "- We provide an extensive exploration of the existing literature on how personalization is integrated into various stages of RAG (pre-retrieval, retrieval, and generation) and agentic RAG (understanding, planning, execution, and generation)." + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.796, + 0.88, + 0.827 + ], + "angle": 0, + "content": "- We summarize the key datasets, benchmarks, and evaluation metrics used in existing research for each subtask to facilitate future studies in the respective domains." + }, + { + "type": "list", + "bbox": [ + 0.176, + 0.745, + 0.88, + 0.827 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.525, + 0.832, + 0.534, + 0.84 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.116, + 0.091, + 0.348, + 0.103 + ], + "angle": 0, + "content": "A Survey of Personalization: From RAG to Agent" + }, + { + "type": "header", + "bbox": [ + 0.54, + 0.091, + 0.822, + 0.102 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "image", + "bbox": [ + 0.278, + 0.124, + 0.681, + 0.451 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.278, + 0.47, + 0.658, + 0.484 + ], + "angle": 0, + "content": "Fig. 1. Correlation between personalization and RAG with agent flow." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.518, + 0.82, + 0.55 + ], + "angle": 0, + "content": "- We also highlight the limitations of current research and suggest future directions for personalized RAG, emphasizing potential advancements to address existing challenges." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.56, + 0.822, + 0.643 + ], + "angle": 0, + "content": "The outline of this survey is as follows: we introduce what is personalization (Sec. 2) and explain how personalization is adopted into RAG pipeline (Sec. 3). Then, we present a literature review on where to integrate personalization within different stages of RAG and agentic RAG workflows (Sec. 4) and discuss the key datasets and evaluation metrics used in existing research (Sec.5). Lastly, we present a discussion on the limitations of current research and future directions (Sec. 6)." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.663, + 0.345, + 0.675 + ], + "angle": 0, + "content": "2 WHAT IS PERSONALIZATION" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.684, + 0.82, + 0.75 + ], + "angle": 0, + "content": "Personalization in current research refers to the tailoring of model predictions or generated content to align with an individual's preferences. In the context of RAG and agents, personalization involves incorporating user-specific information at various stages of the RAG pipeline or within agents. User personalization can be categorized into the following types:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.76, + 0.82, + 0.791 + ], + "angle": 0, + "content": "- Explicit User Profile: Explicitly presented user information, including biographical details, attributes (e.g., age, location, gender, education), and social connections (e.g., social networks)." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.795, + 0.82, + 0.825 + ], + "angle": 0, + "content": "- User Historical Interactions: Behavioral data, including browsing history, clicks, and purchases, which help infer user interests and preferences to improve personalization." + }, + { + "type": "list", + "bbox": [ + 0.116, + 0.76, + 0.82, + 0.825 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.465, + 0.832, + 0.473, + 0.841 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.177, + 0.091, + 0.46, + 0.103 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "header", + "bbox": [ + 0.781, + 0.091, + 0.88, + 0.102 + ], + "angle": 0, + "content": "X. Li and P. Jia, et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.39, + 0.119, + 0.668, + 0.132 + ], + "angle": 0, + "content": "Table 1. Overview of Personalized RAG and Agent." + }, + { + "type": "table", + "bbox": [ + 0.179, + 0.139, + 0.88, + 0.501 + ], + "angle": 0, + "content": "
FieldSub-fieldSubsub-fieldPapers
Pre-retrievalQuery RewritingLearning to Personalized Query RewriteCLE-QR [60], CGF [38], PEARL [80]
LLM to Personalized Query RewriteLeast-to-Most Prompting [173], ERAGent [112], CoPS [174], Agent4Ranking [61], FIG [22], BASES [99]
Query ExpansionTagging-based query expansionGossiple [10], Biancalana and Micarelli [12], SoQuES [15], Zhou et al. [172]
ElseLin and Huang [66], Bender et al. [9], Axiomatic PQEC [79], WE-LM [144], PSQE [14], PQEWC [7]
OthersBobo [33], Kannadasan and Aslanyan [52], PSQE [8]
RetrievalIndexingPEARL [80], KG-Retriever [21], EMG-RAG [137], PGraphRAG [5]
RetrievalDense RetrievalMeMemo [138], RECAP [71], LAPDOG [43], Gu et al. [37], PersonalLM [77], UIA [155], XPERT [125], DPSR [157], RTM [11], Pearl [80], MemPrompt [74], EERRA [23], MALP [160], USER-LLM [84], PER-PCS [120]
Sparse RetrievalOPPU [121], PAG [101], Au et al. [5], UniMS-RAG [128], Deng et al. [29],
Prompt-based RetrievalLAPS [50], UniIMP [140], Shen et al. [111]
OthersSalem et al. [103], PersonalITM [65], Zhang et al. [165]
Post-retrievalPersonaRAG [156], Pavliukevich et al. [89], UniMS-RAG [128], Salemi and Zamani [106], Zhang et al. [164], AutoCompressors [24], FIT-RAG [76]
GenerationGeneration from Explicit PreferencesDirect PromptingP² [49], Character Profiling [154] OpinionQA [107], Kang et al. [51], Liu et al. [67], Cue-CoT [129], TICL [26]
Profile-Augmented PromptingGPG [158], Richardson et al. [101], ONCE [70], LLMTreeRec [163], KAR [145], Matryoshka [58]
Personalized-Prompt PromptingLi et al. [57], RecGPT [166], PEPLER-D [59], GRAPA [94], SGPT [28], PFCL [152]
Generation from Implicit PreferencesFine-tuning-Based MethodsPLoRA [165], LM-P [142], MiLP [165], OPPU [122], PER-PCS [120], Review-LLM [91], UserIdentifier [78], UserAdapter [171], HYDRA [175], PocketLLM [90], CoGenesis [161]
Reinforcement Learning-Based MethodsP-RLHF [62], P-SOUPS [47], PAD [20], REST-PG [104], Salemi et al. [103], RewrimerSIRI [57],Kulkarni et al. [54]
From RAG to AgentPersonalized UnderstandingIn user-profile understandingXu et al. [148], Abbasian et al. [2],
In agent's role understandingRoleLLM [139], Character-LLM [110], Wang et al. [134],
In agent's user-role joint understandingSocialBench [18], Dai et al. [27], Ran et al. [96], Wang et al. [126], Tu et al. [123], Necko [153]
Personalized Planning and ExecutionMemory ManagementEMG-RAG [137], Park et al. [87], Abbasian et al. [2], RecAgent [133], TravelPlanner+ [114], PersonalWAB [17], VOYAGER [127], MemoeryLLM [136]
Tool and API CallingVOYAGER [127], Zhang et al. [159], PUMA [17], Wang et al. [126], PenetrativeAI [148], Huang et al. [44], [87], MetaGPT [40], OKR-Agent [169]
Personalized GenerationAlignment with User FactCharacter-LLM [110], Wang et al. [135], Dai et al. [27]
Alignment with User PreferencesWang et al. [139], Ran et al. [96], Wang et al. [134], Chen et al. [18]
" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.531, + 0.88, + 0.56 + ], + "angle": 0, + "content": "- User Historical Content: Implicit personalization derived from user-generated content, such as chat history, emails, reviews, and social media interactions." + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.565, + 0.872, + 0.579 + ], + "angle": 0, + "content": "- Persona-Based User Simulation: The use of LLM-based agents to simulate and generate personalized interactions." + }, + { + "type": "list", + "bbox": [ + 0.176, + 0.531, + 0.88, + 0.579 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.588, + 0.88, + 0.62 + ], + "angle": 0, + "content": "Integrating this personalized information at various stages of the RAG and agent workflows enables dynamic alignment with human preferences, thereby making responses more user-centric and adaptive." + }, + { + "type": "title", + "bbox": [ + 0.176, + 0.638, + 0.461, + 0.65 + ], + "angle": 0, + "content": "3 HOW TO ADOPT PERSONALIZATION" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.66, + 0.712, + 0.674 + ], + "angle": 0, + "content": "We define the process of introducing personalization within the RAG pipeline as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.413, + 0.685, + 0.881, + 0.699 + ], + "angle": 0, + "content": "\\[\ng = \\mathcal {G} (\\mathcal {R} (Q (q, p), C, p), \\text {p r o m p t}, p, \\theta) \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.709, + 0.882, + 0.827 + ], + "angle": 0, + "content": "where \\( p \\) denotes personalized information, and the process unfolds in three steps. In the pre-retrieval phase, query processing \\( (Q) \\) refines the query \\( q \\) using personalized information, such as through query rewriting or expansion. During the retrieval phase, the retriever \\( (\\mathcal{R}) \\) leverages \\( p \\) to fetch relevant documents from the corpus \\( (C) \\). Finally, in the generation phase, the retrieved information, combined with \\( p \\) and structured using the given prompt, id fed into the generator \\( (\\mathcal{G}) \\) with parameter \\( \\theta \\) to produce the final response \\( g \\). It is evident that personalized information directly influences multiple stages of the RAG pipeline. In this survey, we consider the agent system as a specialized application of the RAG framework, where personalization is incorporated in a manner similar to the RAG framework." + }, + { + "type": "page_number", + "bbox": [ + 0.525, + 0.832, + 0.534, + 0.84 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.116, + 0.091, + 0.348, + 0.103 + ], + "angle": 0, + "content": "A Survey of Personalization: From RAG to Agent" + }, + { + "type": "header", + "bbox": [ + 0.54, + 0.091, + 0.822, + 0.102 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "image", + "bbox": [ + 0.116, + 0.125, + 0.818, + 0.384 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.316, + 0.392, + 0.62, + 0.405 + ], + "angle": 0, + "content": "Fig. 2. Overview of the personalized pre-retrieval stage." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.426, + 0.416, + 0.439 + ], + "angle": 0, + "content": "4 WHERE TO ADOPT PERSONALIZATION" + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.448, + 0.24, + 0.46 + ], + "angle": 0, + "content": "4.1 Pre-retrieval" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.47, + 0.823, + 0.536 + ], + "angle": 0, + "content": "4.1.1 Definition. Pre-retrieval is a crucial step in information retrieval systems, where the original user query is enhanced or modified before the retrieval process to improve the relevance and quality of the search results, as shown in Figure 2. This process often incorporates additional contextual or personalized information to better align the query with the user's intent. The process can be formalized as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.427, + 0.546, + 0.822, + 0.561 + ], + "angle": 0, + "content": "\\[\nq ^ {*} = Q (q, p) \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.571, + 0.821, + 0.601 + ], + "angle": 0, + "content": "where \\( p \\) and \\( q \\) denote the personalized information and original query, and \\( q^{*} \\) is the optimized query after query reformulation." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.615, + 0.822, + 0.698 + ], + "angle": 0, + "content": "4.1.2 Query Rewriting. Query rewriting in RAG at the pre-retrieval stage refers to the process of reformulating user queries to enhance retrieval effectiveness by improving relevance, disambiguating intent, or incorporating contextual information before retrieving documents from an external knowledge source. The literature on personalized query rewriting can be broadly classified into two primary categories: (1) Direct Personalized Query Rewriting and (2) Auxiliary Personalized Query Rewriting." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.71, + 0.823, + 0.828 + ], + "angle": 0, + "content": "(1). Direct Personalized Query Rewriting. The first category focuses on personalized query rewriting by using direct models. For example, Cho et al. [25] presents a personalized search-based query rewrite system for conversational AI that addresses user-specific semantic and phonetic errors. Nguyen et al. [82] apply reinforcement learning techniques to improve query rewriting in online e-commerce systems, leveraging distilled LLMs for personalized performance. CLE-QR [60] explores query rewriting in Taobao's search engine to enhance user satisfaction through customized query adaptation. CGF [38] introduces a constrained generation framework that allows for more flexible and personalized query rewriting in conversational AI. Li et al. [57] investigate learning methods to rewrite prompts for personalized" + }, + { + "type": "page_number", + "bbox": [ + 0.465, + 0.832, + 0.472, + 0.84 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.177, + 0.091, + 0.46, + 0.103 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "header", + "bbox": [ + 0.781, + 0.091, + 0.88, + 0.102 + ], + "angle": 0, + "content": "X. Li and P. Jia, et al." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.122, + 0.882, + 0.171 + ], + "angle": 0, + "content": "text generation, improving the relevance and engagement of AI-generated content. Additionally, PEARL [80] discusses personalizing large language model-based writing assistants through the integration of generation-calibrated retrievers, enhancing AI-generated content." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.186, + 0.885, + 0.374 + ], + "angle": 0, + "content": "(2). Auxiliary Personalized Query Rewriting. The second category emphasizes personalized query rewriting by using auxiliary mechanisms, such as retrieval, reasoning strategies, and external memory. Zhou et al. [173] propose a least-to-most prompting strategy that aids in complex reasoning within LLMs, which can be adapted for personalized text generation. ERAGent [112] enhance retrieval-augmented LLMs to improve personalization, efficiency, and accuracy, indirectly supporting personalized query rewriting for content generation. CoPS [174] integrate LLMs with memory mechanisms to create more personalized search experiences, which also influences content generation through better query understanding. Further, Agent4Ranking [61] employs multi-agent LLMs to perform semantic robust ranking, including personalized query rewriting to improve search rankings. FIG [22] combine graph-based methods with LLMs to query rewrite, improving personalized content generation and conversational interactions. Lastly, BASES [99] employ LLM-based agents to simulate large-scale web search user interactions, contributing to the development of personalized query rewriting strategies for content generation." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.389, + 0.884, + 0.492 + ], + "angle": 0, + "content": "4.1.3 Query Expansion. Query expansion enhances retrieval systems by expanding a user's original query with additional terms, synonyms, or refined structure to better capture intent. This improves the relevance and scope of retrieved documents. Recent advancements in LLMs have reinvigorated this field [46, 48, 132], leveraging their comprehension and generation abilities to expand queries using encoded knowledge or external retrieval, with notable success. Personalized query expansion, a subset, incorporates user-specific data to tailor results, boosting performance and customizing the search experience." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.506, + 0.884, + 0.658 + ], + "angle": 0, + "content": "(1). Tagging-based Query Expansion. By 2009, studies began incorporating tagging information to enhance personalized query expansion. For instance, Gossle [10] introduced the TagMap and TagRank algorithms, which dynamically selected tags from personalized networks constructed using the cosine similarity of user-item tag distances, improving recall performance. Similarly, Biancalana and Micarelli [12] recorded user queries and visited URLs, leveraging social bookmarking to extract relevant tags and build a personalized three-dimensional co-occurrence matrix. Based on this, multiple semantically categorized expanded queries were generated to better reflect user interests. Further advancements include SoQuES [15], which integrated tag semantic similarity with social proximity, and a graph-based approach [172] that utilized Tag-Topic models and pseudo-relevance feedback for term weighting, tailoring the expansion process to individual user preferences." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.673, + 0.882, + 0.828 + ], + "angle": 0, + "content": "(2). Else. Apart from tagging-based techniques, early research on Personalized Query Expansion primarily focused on modeling user personalization based on search history [66], social networks, or preferences derived from friendship networks [9]. The Axiomatic PQEC framework [79] formalized expansion rules using both local (user behavior-driven) and social (network-driven) strategies. In 2017, WE-LM [144] advanced this paradigm by modeling multi-relational networks with word embeddings across tag-word relationships, refining associations through affinity graphs. Later, PSQE [14] further improved tagging-based methods using utf-iuf user profiling, integrating a tag similarity graph with user profiles in the online phase to compute expansion terms relevant to user interests in real-time, achieving dynamic personalized expansion. In addition, PQEWC [7] leveraged clustering and contextual word embeddings to optimize query expansions dynamically." + }, + { + "type": "list", + "bbox": [ + 0.175, + 0.506, + 0.884, + 0.828 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.525, + 0.832, + 0.534, + 0.841 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.116, + 0.091, + 0.348, + 0.103 + ], + "angle": 0, + "content": "A Survey of Personalization: From RAG to Agent" + }, + { + "type": "header", + "bbox": [ + 0.54, + 0.091, + 0.822, + 0.102 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "image", + "bbox": [ + 0.119, + 0.118, + 0.822, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.328, + 0.289, + 0.609, + 0.302 + ], + "angle": 0, + "content": "Fig. 3. Overview of the personalized retrieval stage." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.333, + 0.823, + 0.417 + ], + "angle": 0, + "content": "4.1.4 Others. Besides query rewriting and query expansion, other personalized query-related research focuses on areas like query disambiguation and query auto-completion [116]. Bobo [33] allows users to input contextual terms reflecting their domain knowledge. In 2019, a method [52] applied fastText embeddings from recent queries to rank candidates. In addition, PSQE [8] employed synthetic user profiles from Wikipedia and word2vec embeddings for query disambiguation." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.431, + 0.822, + 0.497 + ], + "angle": 0, + "content": "4.1.5 Discussion. While both query rewriting and query expansion aim to align user input with system understanding to enhance retrieval quality, their roles in personalization differ in fundamental ways. Understanding the distinct operational characteristics and application scenarios of each technique is essential for designing effective personalized retrieval systems. The key takeaways are listed as follows:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.507, + 0.82, + 0.539 + ], + "angle": 0, + "content": "- Query rewriting is most beneficial when the original query is ambiguous, underspecified, or misaligned with retrieval intents, particularly in conversational or multi-turn settings." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.541, + 0.82, + 0.573 + ], + "angle": 0, + "content": "- Query expansion is most effective when the original query is relevant but incomplete - i.e., when it needs to be semantically broadened to cover additional relevant concepts." + }, + { + "type": "list", + "bbox": [ + 0.116, + 0.507, + 0.82, + 0.573 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.592, + 0.216, + 0.604 + ], + "angle": 0, + "content": "4.2 Retrieval" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.613, + 0.82, + 0.68 + ], + "angle": 0, + "content": "4.2.1 Definition. The retrieval process involves finding the most relevant documents \\( D^{*} \\) from a corpus \\( C \\) based on a query \\( q^{*} \\), as shown in Figure 3. To incorporate personalization, additional user-specific information \\( p \\) is integrated into the retrieval function \\( \\mathcal{R} \\). This allows the retrieval process to tailor the selected documents to align with individual user preferences or contexts, thereby enhancing the relevance and personalization of the generated outputs." + }, + { + "type": "equation", + "bbox": [ + 0.415, + 0.689, + 0.822, + 0.706 + ], + "angle": 0, + "content": "\\[\nD ^ {*} = \\mathcal {R} (q ^ {*}, C, p) \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.715, + 0.82, + 0.764 + ], + "angle": 0, + "content": "In the retrieval process, personalization can primarily be introduced by focusing on three steps: indexing, retrieval, and post-retrieval. These steps ensure efficient and accurate retrieval of relevant documents or knowledge, while tailoring the process to individual user preferences. Below, we provide a detailed explanation of each step." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.778, + 0.822, + 0.827 + ], + "angle": 0, + "content": "4.2.2 Indexing. Indexing organizes knowledge base data into a structured format to facilitate efficient retrieval. Within the RAG pipeline, documents are either chunked or entirely encoded into representations before being integrated into searchable systems [30, 117]. Conventional encoding methods employ either sparse encoding techniques (e.g.," + }, + { + "type": "page_number", + "bbox": [ + 0.464, + 0.832, + 0.473, + 0.84 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.177, + 0.091, + 0.46, + 0.103 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "header", + "bbox": [ + 0.781, + 0.091, + 0.88, + 0.102 + ], + "angle": 0, + "content": "X. Li and P. Jia, et al." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.122, + 0.881, + 0.154 + ], + "angle": 0, + "content": "TF-IDF [95], BM25 [102]) or dense encoding approaches leveraging pre-trained models, such as BERT [1], Siamese Encoders [98], or LLM-based encoders [64, 147]." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.157, + 0.885, + 0.274 + ], + "angle": 0, + "content": "To introduce personalization at the indexing stage, PEARL [80] generates user embeddings by encoding personal history data with models like DeBERTa. These embeddings are subsequently clustered to create personalized shared indices. Other approaches integrate knowledge graphs into indexing to enhance retrieval performance. For example, KG-R retriever [21] employs a Hierarchical Index Graph, consisting of a knowledge graph layer and a collaborative document layer, to improve RAG retrieval. EMG-RAG [137] incorporates personalized memory within an editable knowledge graph, enabling dynamic retrieval. Similarly, PGraphRAG [5] leverages user-centric knowledge graphs to enhance personalization in retrieval tasks." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.289, + 0.884, + 0.338 + ], + "angle": 0, + "content": "4.2.3 Retrieval. The Retrieval step matches a user query with the indexed knowledge base to fetch relevant candidates. It can be broadly categorized into four different types: (1) Dense Retrieval, (2) Sparse Retrieval, (3) Prompt-based Retrieval, and (4) Others." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.352, + 0.885, + 0.539 + ], + "angle": 0, + "content": "(1). Dense Retrieval. Dense retrieval methods often use vector embeddings and similarity metrics (e.g., cosine similarity) and achieve personalization by encoding user preferences, context, or interactions into query or document embeddings, enabling tailored results through similarity-based matching. For instance, MeMemo [138] retrieves personalized information by matching user-specific embeddings with document vectors, focusing on private, on-device text generation. Similarly, RECAP [71] and LAPDOG [43] enhance personalized dialogue generation by encoding queries and user profiles as dense vectors and retrieving top-N results, ensuring user-specific context drives the responses. In chatbots, Gu et al. [37] integrates conversational context and user profiles to align retrieved responses with user personas. PersonalM [77] employs group-wise contrastive learning, training its retrieval model to align user queries with domain-specific text fragments, thereby improving personalization. UIA [155] employs dual encoders to retrieve documents tailored to user preferences. XPERT [125] incorporates temporal events and user interactions into embeddings, enabling large-scale retrieval across millions of items." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.542, + 0.887, + 0.66 + ], + "angle": 0, + "content": "Dense retrieval also enhances specific applications like e-commerce, medical assistance, and language models. DPSR [157] and RTM [11] encode user queries and product information to personalize product searches dynamically. Pearl [80] and MemPrompt [74] retrieve personalized content by leveraging historical user data and memory-assisted mechanisms. EERRA [23] uses review embeddings as dense queries for recommendations. In medical assistance, MALP [160] and User-LLM [84] integrate short- and long-term user interactions into embeddings for contextualized, personalized responses. Finally, PER-PCS [120] retrieves relevant information using individual user histories, enhancing the personalization capabilities of large language models." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.674, + 0.887, + 0.828 + ], + "angle": 0, + "content": "(2). Sparse Retrieval. Sparse retrieval methods often rely on term-based matching (e.g., BM25) and apply personalization by assigning higher weights to terms or keywords that are more relevant to the user. OPPU [121] uses the BM25 algorithm to select the k most relevant records from the user's historical data for the current query. Similarly, PAG [101] incorporates user input and profiles to enhance summarization and retrieval, aligning sparse representations with personalization objectives for large language models. Au et al. [5] uses BM25 search algorithms to find entries related to the target user or neighboring users through the graph structure. UniMS-RAG [128] combines sparse and dense retrieval by leveraging multi-source knowledge, such as dialogue context and user images, to refine personalized responses in dialogue systems. Lastly, Deng et al. [29] apply sparse retrieval to support fact-based queries, considering user queries and preferences to enhance answer generation for e-commerce applications." + }, + { + "type": "page_number", + "bbox": [ + 0.525, + 0.832, + 0.535, + 0.841 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.116, + 0.091, + 0.348, + 0.103 + ], + "angle": 0, + "content": "A Survey of Personalization: From RAG to Agent" + }, + { + "type": "header", + "bbox": [ + 0.54, + 0.091, + 0.822, + 0.102 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.122, + 0.823, + 0.309 + ], + "angle": 0, + "content": "(3). Prompt-based Retrieval. Prompt-based retrieval leverages prompts to guide retrieval from the model or external sources and introduces personalization by crafting user-specific prompts that guide the retrieval process. These prompts may include explicit user preferences, historical interactions, or detailed instructions that reflect the user's unique requirements. By embedding this personalized context directly into the prompt, the retrieval process can dynamically adjust to capture and return results that are most relevant to the user. LAPS [50] focuses on multi-session conversational search by storing user preferences and dialogue context, then using prompts to retrieve relevant information tailored to the user's biases and categories of interest. UniMP [140] employs user interaction histories as input to prompt-based retrieval, enabling personalized recommendations for multi-modal tasks, such as vision-language applications, by aligning prompts with user behavioral data. In contrast, Shen et al. [111] explores the use of LLMs to extract empathy and narrative styles from user-provided stories, but this work primarily focuses on style extraction and does not explicitly involve a retrieval component." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.322, + 0.824, + 0.458 + ], + "angle": 0, + "content": "(4). Others. Reinforcement learning-based retrieval personalizes the process by optimizing retrieval policies based on user feedback, learning user preferences over time to adjust strategies. Salemi et al. [103] combines models like BM25, RbR, and dense retrieval, refining them with reinforcement learning (RL) and knowledge distillation (KD) to adapt to user profiles for personalized outputs. Parameter-based retrieval leverages pre-trained model parameters to implicitly store and retrieve user-specific information, allowing direct retrieval from the model without traditional indices. PersonalTM [65] generates document identifiers (Document IDs) using a Transformer model, encoding query, history, and document relationships into its parameters for personalization. Similarly, Zhang et al. [165] uses parameterized representations to integrate user queries and histories, tailoring responses to individual preferences." + }, + { + "type": "list", + "bbox": [ + 0.115, + 0.122, + 0.824, + 0.458 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.471, + 0.821, + 0.52 + ], + "angle": 0, + "content": "4.2.4 Post-retrieval. Current Post-Retrieval methods primarily focus on refining retrieved documents or responses to improve relevance and coherence, current methodologies could be categorized into three parts (1) Re-ranking, (2) Summarization, and (3) Compression." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.533, + 0.824, + 0.669 + ], + "angle": 0, + "content": "(1). Re-ranking. Re-ranking enhances personalized content generation by prioritizing more relevant documents at the top. PersonaRAG [156] extends RAG by integrating user-centric agents, such as the Live Session Agent and the Document Ranking Agent, to refine document ranking and improve overall performance. Pavliukevich et al. [89] propose a cross-encoder BERT model for re-ranking external knowledge within a personalized context. UniMS-RAG [128] introduces a scoring mechanism that evaluates retrieved documents and outputs by optimizing the retriever. Besides, it includes an evidence attention mask, enabling re-ranking during inference and applying it to personalized datasets. Salemi and Zamani [106] present an iterative approach to optimizing ranking results based on the expectation-maximization algorithm, with performance validated in personalized scenarios." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.682, + 0.821, + 0.731 + ], + "angle": 0, + "content": "(2). Summarization. Summarization refers to the process of summarizing retrieved information to enhance performance. For instance, Zhang et al. [164] introduced a role-playing agent system to summarize retrieved history in order to improve the final Personalized Opinion Summarization process." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.744, + 0.822, + 0.827 + ], + "angle": 0, + "content": "(3). Compression. Compression involves condensing embeddings or retrieved content to enhance efficiency and effectiveness. Approaches like AutoCompressor [24] compress contextual embeddings into shorter semantic representations, and FIT-RAG [76] introduces a self-knowledge recognizer along with a sub-document-level token reduction mechanism to minimize tokens within RAG pipeline. However, few studies have specifically explored personalized fields, highlighting a promising direction for future research." + }, + { + "type": "list", + "bbox": [ + 0.115, + 0.533, + 0.824, + 0.827 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.465, + 0.832, + 0.473, + 0.84 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.177, + 0.091, + 0.46, + 0.103 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "header", + "bbox": [ + 0.781, + 0.091, + 0.88, + 0.102 + ], + "angle": 0, + "content": "X. Li and P. Jia, et al." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.122, + 0.885, + 0.432 + ], + "angle": 0, + "content": "4.2.5 Discussion. Indexing, retrieval, and post-retrieval methods each play a critical role in ensuring efficient and personalized information processing, with specific applications and trade-offs. Indexing focuses on organizing knowledge bases for efficient retrieval, using techniques such as sparse encoding methods like TF-IDF and BM25, which are efficient but limited in understanding semantics, and dense encoding methods like BERT and DeBERTa, which provide better semantic understanding but require significant computational resources. These methods are widely used in tasks like question answering and personalized recommendation systems. Retrieval involves matching user queries with relevant documents and can be categorized into dense retrieval, which provides high semantic understanding and personalization but is computationally expensive; sparse retrieval, which is efficient and interpretable but less capable of handling semantics; prompt-based retrieval, which is highly flexible and adaptable to user needs but requires careful engineering of prompts; and advanced methods like reinforcement learning-based approaches, which dynamically adapt to user feedback but are complex to implement. This step is essential in applications like personalized dialogue systems, search engines, and e-commerce. Post-retrieval methods refine retrieved results to enhance relevance and coherence through re-ranking, which improves personalization and prioritizes relevant content but increases computational overhead; summarization, which simplifies complex information for better user understanding but risks losing critical details; and compression, which reduces computational costs by condensing information but remains underexplored in personalized contexts. Together, these methods provide a comprehensive pipeline for delivering efficient, relevant, and personalized outputs, balancing their strengths in semantic understanding, relevance, and flexibility with challenges related to computational costs and implementation complexity." + }, + { + "type": "title", + "bbox": [ + 0.176, + 0.448, + 0.29, + 0.459 + ], + "angle": 0, + "content": "4.3 Generation" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.468, + 0.884, + 0.518 + ], + "angle": 0, + "content": "4.3.1 Definition. Personalized generation incorporates user-specific retrieved documents \\( D^{*} \\), task-specific prompt prompt, and user preference information \\( p \\) via the generator \\( \\mathcal{G} \\) parameterized by \\( \\theta \\) to produce tailored content \\( g^{*} \\) aligned with individual preference, where the flow is shown in Figure 4. The generation process can be formulated as" + }, + { + "type": "equation", + "bbox": [ + 0.448, + 0.527, + 0.881, + 0.543 + ], + "angle": 0, + "content": "\\[\ng ^ {*} = \\mathcal {G} (D ^ {*}, \\text {p r o m p t}, p, \\theta). \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.553, + 0.884, + 0.636 + ], + "angle": 0, + "content": "Personalized generation can be achieved by incorporating explicit and implicit preferences. Explicit preference-driven methodologies utilize direct input signals (e.g., \\( D^{*} \\), prompt, and \\( p \\)), to tailor outputs to specific user preferences. Conversely, implicit preference-encoded approaches embed personalized information within the parameters \\( \\theta \\) of the generator model, during training, thereby facilitating preference alignment without the necessity for explicit runtime inputs." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.648, + 0.884, + 0.75 + ], + "angle": 0, + "content": "4.3.2 Generation from Explicit Preferences. Integrating explicit preferences into LLMs facilitates personalized content generation. Explicit preference information encompasses user demographic information (e.g., age, occupation, gender, location), user behavior sequences (reflecting historical behavioral patterns), and user historical output texts (capturing writing style and tone preferences). The injection of explicit preferences for personalized generation can be categorized into three types: (1) Direct-integrated Prompting, (2) Summary-augmented Prompting, and (3) Adaptive Prompting." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.762, + 0.884, + 0.829 + ], + "angle": 0, + "content": "(1). Direct-integrated Prompting. Integrating user explicit preferences into language models through prompting enables the prediction of users' intent and behavioral patterns, facilitating personalized content generation. For instance, \\(\\mathrm{P}^2\\) [49], Character Profiling [154], and OpinionQA [107] integrate personalized data into LLMs through prompting for role-playing task, thereby aligning the model's responses with specified user profiles. Kang et al. [51] and Liu et al. [67]" + }, + { + "type": "page_number", + "bbox": [ + 0.523, + 0.832, + 0.536, + 0.841 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.116, + 0.091, + 0.348, + 0.103 + ], + "angle": 0, + "content": "A Survey of Personalization: From RAG to Agent" + }, + { + "type": "header", + "bbox": [ + 0.54, + 0.091, + 0.822, + 0.102 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "image", + "bbox": [ + 0.137, + 0.121, + 0.803, + 0.293 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.32, + 0.31, + 0.616, + 0.324 + ], + "angle": 0, + "content": "Fig. 4. Overview of the personalized generation stage." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.35, + 0.823, + 0.435 + ], + "angle": 0, + "content": "integrate interaction histories into LLMs via prompting to predict user rating for candidate items. Cue-CoT [129] employs chain-of-thought reasoning to infer user needs from contextual cues, enabling personalized responses to in-depth dialogue questions. Additionally, TICL [26] proposes a trial-and-error framework that critiques initial LLM-generated responses, derives explanations and integrates these negative examples into prompts to improve personalization alignment." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.447, + 0.824, + 0.601 + ], + "angle": 0, + "content": "(2). Summary-augmented Prompting. Direct integration of personalized information via prompting struggles with ambiguous intent signals: Lengthy interaction histories introduce noise that obscures critical behavioral patterns [69], while sparse behavioral data lacks sufficient context for LLMs to derive meaningful user preferences. To address these issues, recent approaches focus on summarizing user personalized intents and integrating them into prompts. For instance, GPG [158] extracts key user habits and preferences from personal contexts, enabling fine-grained personalization. Similarly, LLMs are employed to generate task-specific summaries of user preferences, enhancing retrieval-augmented personalized generation capabilities [101]. In recommendation systems, ONCE [70], LLMTreeRec [163], and KAR [145] leverage historical user-item interactions to summarize user preferences. Furthermore, Matryoshka [58] generates user preference summaries by dynamically retrieving and synthesizing historical data." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.613, + 0.824, + 0.766 + ], + "angle": 0, + "content": "(3). Adaptive Prompting. Manually designing personalized prompts demands both expert knowledge and significant labor, motivating the development of automated methods for personalized prompt generation. For example, Li et al. [57] trains a personalized prompt rewriter via supervised and reinforcement learning. RecGPT [166] and PEPLER-D [59] leverage prompt tuning to generate personalized prompts, enhancing sequential and explainable recommendations, respectively. GRAPA [94] integrates semantic and collaborative signals from user-item interaction graphs with graph neural networks to generate context-aware personalized prompts. SGPT [28] employs prompt tuning to jointly model common and group-specific patterns, bridging generalized and personalized federated learning paradigms. Furthermore, PFCL [152] achieves multi-granularity human preference modeling: coarse-grained prompts distill shared knowledge, while fine-grained prompts adapt to individual user characteristics." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.447, + 0.824, + 0.766 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.779, + 0.824, + 0.828 + ], + "angle": 0, + "content": "4.3.3 Generation from Implicit Preferences. Unlike explicit preference modeling, which captures user preferences through textual input, implicit preference-based methods incorporate personalization through internal parameters. This personalization is achieved either through Parameter-Efficient Fine-tuning (PEFT) techniques, such as LoRA [42]," + }, + { + "type": "page_number", + "bbox": [ + 0.463, + 0.832, + 0.475, + 0.841 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.177, + 0.091, + 0.46, + 0.103 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "header", + "bbox": [ + 0.781, + 0.091, + 0.881, + 0.102 + ], + "angle": 0, + "content": "X. Li and P. Jia, et al." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.122, + 0.884, + 0.155 + ], + "angle": 0, + "content": "or reinforcement learning-based approaches for preference alignment [20, 57]. Based on these strategies, we classify existing methods into two categories: (1) Fine-tuning-Based Methods and (2) Reinforcement Learning-Based Methods." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.167, + 0.885, + 0.32 + ], + "angle": 0, + "content": "(1). Fine-tuning Based Methods. For fine-tuning methods, LoRA is the most widely adopted since it is resource-efficient and enables rapid adaptation without compromising model performance. PLoRA [165] introduces a personalized knowledge integration framework that combines task-specific LoRA with user-specific knowledge. Similarly, LM-P [142] personalizes information via LoRA by incorporating User ID as a personalization factor. MiLP [165] employs Bayesian optimization to determine the optimal personalization injection configuration, including LoRA settings, to effectively capture and utilize user-specific information. OPPU [122] and PER-PCS [120] follow a similar approach, leveraging user history data for fine-tuning LoRA-based personalization. However, PER-PCS differs by incorporating a gating module that selects the appropriate LoRA, enabling fine-grained personalization. Additionally, Review-LLM [91] integrates LoRA for supervised fine-tuning in the task of personalized review generation." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.323, + 0.884, + 0.424 + ], + "angle": 0, + "content": "Beyond LoRA-based approaches, alternative pipelines have been proposed for personalized generation. UserIdentifier [78] introduces a user-specific identifier, significantly reducing training costs while enhancing personalized demonstration. UserAdapter [171] proposes user-independent prefix embeddings, leveraging prefix tuning for personalization. Meanwhile, HYDRA [175] achieves implicit personalization by training user-specific headers. Recently, researchers have also explored fine-tuning personalized model on edge devices [90] and collaborative learning between small and large language models to enable more personalized generation [161]." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.438, + 0.884, + 0.695 + ], + "angle": 0, + "content": "(2). Reinforcement Learning Based Methods. Apart from fine-tuning based methods, recent research has explored reinforcement learning based techniques to personalize text generation by aligning outputs with user preferences. P-RLHF [62] has been proposed to jointly learn a user-specific and reward model to enable text generation that aligns with a user's styles or criteria. P-SOUPS [47] models multiple user preferences as a Multi-Objective Reinforcement Learning (MORL) problem, decomposing preferences into multiple dimensions, each trained independently. PAD [20] aligns text generation with human preferences during inference by utilizing token-level personalized rewards to guide the decoding process. REST-PG [104] introduces a framework that trains large language models to reason over personal data during response generation. This approach first generates reasoning paths to enhance the LLM's reasoning ability and then employs Expectation-Maximization Reinforced Self-Training to iteratively refine the model based on its high-reward outputs. Additionally, Salemi et al. [103] incorporate reinforcement learning into the RAG pipeline to improve retrieval accuracy, thereby enhancing the personalization of generated content. Other applications include RewriterSIRI [57], which has been introduced to generate text via RL-based personalized prompt rewriting using API-based LLMs, and Kulkarni et al. [54], who explore the use of reinforcement learning to optimize RAG for improving the relevance and coherence of chatbot responses in specialized domains, ultimately enhancing user satisfaction and engagement." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.709, + 0.884, + 0.827 + ], + "angle": 0, + "content": "4.3.4 Discussion. Personalized generation can be adopted via both explicit and implicit preference injection, yet they exhibit distinct characteristics that make them suitable for different scenarios. In explicit preference-based generation, personalization is clearly defined through user profile descriptions, contextual information, and similar inputs, which are incorporated into generators via prompts. A key advantage of this approach is explainability, as the personalized information is explicitly provided and easily traceable. Despite leveraging provided preferences and internal knowledge, explicit preference injection's personalization is constrained by model capabilities and irrelevant information interference. In contrast, implicit preference-based generation internalizes personalized information into" + }, + { + "type": "page_number", + "bbox": [ + 0.523, + 0.832, + 0.536, + 0.84 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.116, + 0.091, + 0.348, + 0.103 + ], + "angle": 0, + "content": "A Survey of Personalization: From RAG to Agent" + }, + { + "type": "header", + "bbox": [ + 0.54, + 0.091, + 0.822, + 0.102 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "image", + "bbox": [ + 0.123, + 0.119, + 0.818, + 0.491 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.262, + 0.502, + 0.677, + 0.516 + ], + "angle": 0, + "content": "Fig. 5. Overview of transition from personalized RAG to personalized agent." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.556, + 0.823, + 0.622 + ], + "angle": 0, + "content": "the generator's parameters through scene-specific personalized data, thereby adapting the model for more fine-grained personalization. However, these methods typically incur substantial training and computational costs, as they require fine-tuning the generator's internal parameters. Therefore, selecting between these approaches should be guided by the specific application scenario and resource constraints." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.643, + 0.285, + 0.657 + ], + "angle": 0, + "content": "4.4 From RAG to Agent" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.664, + 0.823, + 0.749 + ], + "angle": 0, + "content": "4.4.1 Definition. A personalized LLM-based agent is a system designed to dynamically incorporate user context, memory, and external tools or APIs to support highly personalized and goal-oriented interactions [19, 45, 146], and solve problems in a goal-oriented manner [63, 113]. From the previously introduced stages of RAG, we observe that the evolution of personalized RAG reveals a structural convergence with agent architectures. We analyze them from three key perspectives:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.759, + 0.822, + 0.827 + ], + "angle": 0, + "content": "- Personalized Understanding: This phase within the agent parallels the query understanding and rewriting process of RAG as outlined in Section 4.1. However, it extends beyond static semantic parsing by incorporating dynamic user profiling [139] and role modeling [110]. This integration enables the agent to dynamically align interactions with implicit user preferences, facilitating personalized responses and task-specific adaptations [96]." + }, + { + "type": "page_number", + "bbox": [ + 0.463, + 0.832, + 0.475, + 0.841 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.177, + 0.091, + 0.46, + 0.103 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "header", + "bbox": [ + 0.781, + 0.091, + 0.881, + 0.102 + ], + "angle": 0, + "content": "X. Li and P. Jia, et al." + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.122, + 0.88, + 0.205 + ], + "angle": 0, + "content": "- Personalized Planning and Execution: This phase in agents mirrors RAG's retrieval process in Section 4.2 yet it advances beyond static document retrieval by incorporating real-time memory management [87] and sophisticated tool and API calling [127]. This approach ensures the dynamic alignment of external knowledge with personalized constraints, such as integrating medical history in healthcare agents [2], to deliver context-aware and user-specific outcomes." + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.209, + 0.881, + 0.293 + ], + "angle": 0, + "content": "- Personalized Generation: This phase in agents mirrors RAG's generative process in Section 4.3 but transcends static template-based generation by integrating user preference and fact alignment. Agents dynamically enforce user preferences and ensure fact consistency through role-specific mechanisms (e.g., social adaptability in conversational agents [2]), enabling outputs to evolve in harmony with personalized and situational constraints rather than relying solely on predefined generative frameworks." + }, + { + "type": "list", + "bbox": [ + 0.176, + 0.122, + 0.881, + 0.293 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.301, + 0.881, + 0.384 + ], + "angle": 0, + "content": "In general we frame agent architectures as \"personalized RAG++\", where persistent memory [137] replaces static indexes, and tool APIs [17] serve as dynamic knowledge connectors, enabling complicated, human-aligned interactions beyond one-shot retrieval, as shown in Figure 5. This progression highlights that as RAG systems incorporate deeper personalization—requiring user-state tracking, adaptive tool usage, and context-aware generation, they inherently adopt agent-like capabilities." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.399, + 0.881, + 0.482 + ], + "angle": 0, + "content": "4.4.2 Personalized Understanding. Personalized understanding refers to an agent's ability to accurately interpret user inputs by integrating user intent recognition and contextual analysis. This process ensures interactions that are both meaningful and contextually appropriate. The rationale behind this classification lies in its capacity to address three core aspects of understanding: recognizing user intent, analyzing context, and leveraging user profiles. Each of these aspects plays a distinct role in improving the agent's performance." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.496, + 0.88, + 0.614 + ], + "angle": 0, + "content": "(1). User-profile Understanding. In user-profile understanding, an agent's personalized ability primarily depends on its capacity to accurately model and understand the user's preferences, context, and intentions. Xu et al. [148] proposes a framework in which LLMs are designed to understand the physical world, thereby facilitating a deeper connection between the agent and its environment, which is essential for accurate task execution. Abbasian et al. [2] further expands this understanding by emphasizing the importance of personalization in health agents, where the user's profile directly influences the behavior and decisions of the agent. This user understanding is foundational to ensuring that the AI agent performs tasks in a way that aligns with individual user needs." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.629, + 0.88, + 0.764 + ], + "angle": 0, + "content": "(2). Role Understanding. In agent's role understanding, the role of the agent within these environments is also crucial. Recent studies focus on enhancing role-playing capabilities within LLMs. Wang et al. [139] introduce RoleLLM, a benchmark that aims to elicit and refine the role-playing abilities of LLMs, demonstrating how role understanding influences agent performance in conversational tasks. Similarly, Shao et al. [110] present Character-LLM, a trainable agent framework for role-playing, which tailors its responses based on predefined roles. Wang et al. [134] introduce a method for evaluating personality fidelity in role-playing agents through psychological interviews, aiming to enhance the realism and consistency of AI-driven characters. This role understanding allows for more contextually appropriate interactions, increasing the relevance and utility of AI agents across various applications." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.779, + 0.881, + 0.827 + ], + "angle": 0, + "content": "(3). User-role Joint Understanding. In agent's user-role joint understanding, the intersection of user and role understanding is explored through frameworks that evaluate and enhance the social and personality aspects of LLMs. SocialBench Chen et al. [18] provides a sociality evaluation framework for role-playing agents. Dai et al. [27], and" + }, + { + "type": "list", + "bbox": [ + 0.175, + 0.496, + 0.881, + 0.827 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.523, + 0.832, + 0.536, + 0.841 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.116, + 0.091, + 0.348, + 0.103 + ], + "angle": 0, + "content": "A Survey of Personalization: From RAG to Agent" + }, + { + "type": "header", + "bbox": [ + 0.54, + 0.091, + 0.822, + 0.102 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.122, + 0.823, + 0.205 + ], + "angle": 0, + "content": "[96] extend this by incorporating multi-modal data and personality-indicative information, respectively, which allows agents to better adapt to both user and role understanding in dynamic environments. Furthermore, Wang et al. [126] offers a perspective on how role and environment understanding can improve user experience. Tu et al. [123] contribute by providing a benchmark specifically for evaluating role-playing agents in the Chinese context, adding a cultural dimension to role understanding. Finally, Neeko [153] further advances role-based interactions." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.221, + 0.824, + 0.306 + ], + "angle": 0, + "content": "4.4.3 Personalized Planning and Execution. Personalized planning and execution refer to the process of designing and implementing strategies or actions that are specifically tailored to an individual's unique context, and goals [44, 87, 114, 159]. It requires agents to dynamically integrate long-term memory, real-time reasoning, and external tool utilization [40, 41, 169], as demonstrated in healthcare decision support [2] and travel planning scenarios [17]. We analyze two fundamental components that enable this personalization in the following." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.32, + 0.824, + 0.473 + ], + "angle": 0, + "content": "(1). Memory Management. Effective memory systems allow agents to integrate users' historical preferences, behavioral patterns, and contextual habits, enhancing their ability to make planning and tailor interactions to user-specific needs [17, 127, 136]. The EMG-RAG framework [137] combines editable memory graphs with retrieval-augmented generation to maintain dynamic user profiles, while Park et al. [87] implements memory streams and periodic reflection mechanisms to simulate human-like behavior. In healthcare applications, Abbasian et al. [2] integrates multimodal user data through specialized memory modules to optimize treatment recommendations. For recommendation systems, RecAgent [133] employs hierarchical memory structures to model user interaction patterns across multiple domains. Recent advances like TravelPlanner+ [114] demonstrate how memory-augmented LLMs achieve higher relevance in personalized itinerary generation compared to generic planners." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.488, + 0.824, + 0.642 + ], + "angle": 0, + "content": "(2). Tool and API Calling. The integration of external tools expands agents' capabilities beyond pure linguistic reasoning, enabling agents to interact with users and perform personalized tasks [17, 126, 127, 148, 159]. For instance, VOYAGER [127] establishes a paradigm for lifelong skill acquisition through automatic API curriculum learning and skill library construction. In robotics, Zhang et al. [159] develops a bootstrapping framework where LLMs guide robots in tool-mediated skill discovery, enabling a high success rate in novel object manipulation tasks. The PUMA framework [17] demonstrates how personalized web agents can achieve performance gains in e-commerce tasks through adaptive API orchestration. For mobile interaction, Wang et al. [126] implements few-shot tool learning to handle diverse UI operations with minimal training data. These approaches highlight the importance of tool grounding mechanisms [44] that translate linguistic plans into executable API sequences while maintaining personalization constraints." + }, + { + "type": "list", + "bbox": [ + 0.115, + 0.32, + 0.824, + 0.642 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.644, + 0.824, + 0.744 + ], + "angle": 0, + "content": "This synthesis highlights that modern agent systems achieve enhanced personalization through two primary strategies: 1) Memory-augmented architectures, which leverage editable memory graphs [137], reflection mechanisms [87], and hierarchical memory structures [133] to dynamically adapt to user preferences across various domains; and 2) Tool and API integration, which expand agent capabilities by balancing generalization with specialization. Future work may explore improving the contextual relevance and adaptability of memory systems while optimizing real-time tool interaction for seamless task execution." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.76, + 0.824, + 0.827 + ], + "angle": 0, + "content": "4.4.4 Personalized Generation. Based on the foundation of personalized planning and execution mechanisms, which enable agents to adapt strategies to user-specific contexts [44, 159], the next critical concern lies in personalized generation. This capability ensures that generated outputs not only align with factual correctness but also resonate with users' unique preferences, personality traits, and situational needs. Personalized generation bridges the gap between" + }, + { + "type": "page_number", + "bbox": [ + 0.463, + 0.832, + 0.475, + 0.841 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.177, + 0.091, + 0.46, + 0.103 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "header", + "bbox": [ + 0.781, + 0.091, + 0.882, + 0.102 + ], + "angle": 0, + "content": "X. Li and P. Jia, et al." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.122, + 0.882, + 0.155 + ], + "angle": 0, + "content": "adaptive reasoning and human-aligned outcomes, allowing agents to produce contextually relevant and emotionally appropriate responses." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.177, + 0.885, + 0.332 + ], + "angle": 0, + "content": "(1). Alignment with User Fact. Alignment with User Fact emphasizes the accuracy, consistency, and factual grounding of personalized responses, ensuring they remain trustworthy across diverse user interactions. This is particularly challenging in personalized agents, where maintaining character authenticity while avoiding hallucinations requires balancing creativity with factual adherence. Recent advances address these challenges through improved training frameworks and evaluation metrics. For instance, Character-LLM [110] integrates memory-augmented architectures to reduce hallucinations while preserving character-specific traits. Wang et al. [135] investigate quantization effects on personality consistency in edge-deployed agents and stabilize outputs under computational constraints. Dai et al. [27] ensures multimodal consistency (text-image) in role-playing. These works highlight the importance of architectural innovations and rigorous evaluation in achieving reliability." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.354, + 0.887, + 0.44 + ], + "angle": 0, + "content": "(2). Alignment with User Preferences. Alignment with user preferences ensures that generated outputs reflect individualized personalities, values, and interaction styles. This requires agents to dynamically interpret implicit user cues and adapt responses accordingly. Wang et al. [139] benchmarks role-specific alignment. Ran et al. [96] improves personality fidelity via psychological scale datasets. Wang et al. [134] quantifies alignment via psychological interviews. Chen et al. [18] evaluates social adaptability in conversations." + }, + { + "type": "list", + "bbox": [ + 0.175, + 0.177, + 0.887, + 0.44 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.462, + 0.882, + 0.493 + ], + "angle": 0, + "content": "4.4.5 Discussion. The architectural evolution from RAG to personalized agents introduces significant advancements in human-AI interaction but also surfaces critical challenges that warrant further investigation." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.496, + 0.885, + 0.614 + ], + "angle": 0, + "content": "Personalized Understanding, while enabling interpretation of user intent and context, faces limitations in real-time adaptability and generalization. Current approaches like RoleLLM [139] and Character-LLM [110] demonstrate robust role-specific comprehension but struggle with dynamic user state tracking, particularly when handling evolving preferences or multi-session interactions. Furthermore, cultural specificity in benchmarks like CharacterEval [123] reveals gaps in global applicability, as agents trained on region-specific data often fail to generalize across diverse sociocultural contexts. Future work could explore hybrid architectures that combine continuous learning mechanisms with privacy-preserving federated learning to address these adaptability constraints while maintaining user trust." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.618, + 0.885, + 0.737 + ], + "angle": 0, + "content": "Personalized Planning and Execution, achieves remarkable task specialization through memory management and tool integration, yet suffers from scalability issues in complex environments. While frameworks like EMG-RAG [137] and VOYAGER [127] effectively manage user-specific constraints, their reliance on predefined API taxonomies limits emergent tool discovery in novel scenarios. The \"cold-start\" problem persists in domains requiring rapid skill acquisition, as seen in healthcare applications [2], where delayed API responses can compromise decision-making efficacy. A promising direction involves developing meta-reasoning architectures that dynamically prioritize memory recall versus tool invocation based on situational urgency and confidence thresholds." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.739, + 0.885, + 0.824 + ], + "angle": 0, + "content": "Personalized Generation balances factual accuracy with preference alignment but risks over-fitting, where excessive finetuning to user profiles may reinforce cognitive biases. Techniques address surface-level alignment but lack mechanisms for ethical boundary detection. For instance, agents might inadvertently propagate harmful stereotypes when mirroring user preferences without critical oversight. Future systems could integrate value-aligned reinforcement learning with human-in-the-loop validation to preserve authenticity while preventing detrimental customization." + }, + { + "type": "page_number", + "bbox": [ + 0.523, + 0.832, + 0.536, + 0.841 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.117, + 0.091, + 0.347, + 0.103 + ], + "angle": 0, + "content": "A Survey of Personalization: From RAG to Agent" + }, + { + "type": "header", + "bbox": [ + 0.54, + 0.091, + 0.82, + 0.102 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "table_caption", + "bbox": [ + 0.297, + 0.119, + 0.64, + 0.132 + ], + "angle": 0, + "content": "Table 2. Datasets and metrics for personalized RAG and Agent." + }, + { + "type": "table", + "bbox": [ + 0.12, + 0.139, + 0.825, + 0.489 + ], + "angle": 0, + "content": "
FieldMetrics CategoryMetricsDatasets
Pre-retrievalTextual QualityBLEU, ROUGE, EMAvocado Research Email Collection [57, 85], Amazon review [57, 83], Reddit comments[57, 118], Amazon ESCI dataset[82, 97], PIP
Information RetrievalMAP, MRR, NDCG, Precision, Recall, RBPAOL[88, 174], WARRIORS[99], Personalized Results Re-Ranking benchmark [6], delicio.us [9, 15, 144, 172], Flickr [9, 108], CiteULike [10, 14], LRDP [12], Delicious [141], Bibsonomy [79], Wikipedia [8, 33]
ClassificationAccuracy, Macro-F1SCAN [56, 173], AITA WORKSM[53, 80], Robust04 [61]
OthersXEntropy, PMS, Image-Align, PQEC, ProfOverlapAmazon ESCI dataset[82, 97], PIP, Bibsonomy [79]
RetrievalTextual QualityBLEU, ROUGE, Dis, PPLTOPDIAL [130], Pchatbot [93], DuLemon [150]
Information RetrievalRecall, MRR, Precision, F1LiveChat [34], Pchatbot [93], DuLemon [150]
ClassificationAccuracy, SuccTOPDIAL [130], PersonalityEvid [119], DuLemon [150], PersonalityEdit [75]
OthersFluency, Coherence, Plausibility, ES, DD, TPEI, PAEPersonalityEvid [119], PersonalityEdit [75]
GenerationTextual QualityBLEU, ROUGE, Dis, PPL, METEORLaMP [105], Long LaMP [55], Dulemon [150], PGraphRAG [5], AmazonQA/Products [29], Reddit [170], MedicalDialogue [162]
ClassificationAccuracy, F1, Persona F1LaMP [105], Long LaMP [55], Dulemon [150], AmazonQA/Products [29], Reddit [170], MedicalDialogue [162]
RegressionMAE, RMSELaMP [105], Long LaMP [55], PGraphRAG [5]
OthersFluency, Mean Success Rate, Median Relative ImprovementsPersonalized-Gen [3]
AgentTextual QualityBLEU, ROUGE, METEOR, CIDer, EM, Fluency, Coherence, Instruction Adherence, Consistency related metricsRICO [126], RoleBench [139], Shao et al. [110], Socialbench [18], MMRole-Data [27], ROLEPERSONALITY [96], ChatHarui [134], Character-LLM-Data [153], Knowledge Behind Persona [41], Wang et al. [137], Wang et al. [135], Zheng et al. [169]
Information RetrievalRecall, F1, PrecisionKnowledge Behind Persona [41]
ClassificationAccuracy, Failure Rate, Classification Accuracy, Preference Rate, CorrectnessMIT-BIH Arrhythmia Database [148], VirtualHome [44], Socialbench [18], ARC [100], AGIEval [100], HellaSwag [100], MedMCQA [100], AQUA-RAT [100], LogiQA [100], LSAT-AR [100], LSAT-LR [100], LSAT-RC [100], SAT-English [100], SAT-Math [100], PersonalWAB [17], TravelPlanner+ [114]
OthersPass@k, Executability, Productivity, Plausibility of the StoryHong et al. [40], Zheng et al. [169]
" + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.511, + 0.339, + 0.523 + ], + "angle": 0, + "content": "5 EVALUATION AND DATASET" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.532, + 0.823, + 0.72 + ], + "angle": 0, + "content": "In the evolving landscape of personalization, from RAG to advanced Agent-based systems, the evaluation of models relies heavily on diverse datasets and metrics tailored to specific tasks. This survey categorizes metrics into several key types: Textual Quality metrics (e.g., BLEU, ROUGE, METEOR) assess the fluency and coherence of generated outputs; Information Retrieval metrics (e.g., MAP, MRR, Recall) evaluate the accuracy and relevance of retrieved information; Classification metrics (e.g., Accuracy, F1) measure task-specific correctness; Regression metrics (e.g., MAE, RMSE) quantify prediction errors; and Other metrics (e.g., Fluency, Pass@k) address domain-specific or task-unique aspects like plausibility or executability. These metrics span pre-retrieval, retrieval, generation, and agent-based personalization approaches, reflecting their varied objectives. To provide a comprehensive overview, we compile an extensive list of datasets across these fields, as detailed in Table 2. These datasets, paired with their respective metrics, enable researchers to benchmark and refine personalized systems, from enhancing query rewriting to enabling autonomous agents in physical and virtual environments." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.736, + 0.431, + 0.749 + ], + "angle": 0, + "content": "6 CHALLENGES AND FUTURE DIRECTIONS" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.757, + 0.82, + 0.789 + ], + "angle": 0, + "content": "Personalized RAG and agent-based systems still face several critical challenges that warrant further exploration. We list them as follows:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.796, + 0.82, + 0.827 + ], + "angle": 0, + "content": "- Balancing Personalization and Scalability: Integrating personalization data (such as preferences, history, and contextual signals) into RAG processes often increases computational complexity, making it difficult to maintain" + }, + { + "type": "page_number", + "bbox": [ + 0.463, + 0.832, + 0.475, + 0.841 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.177, + 0.091, + 0.46, + 0.103 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "header", + "bbox": [ + 0.781, + 0.091, + 0.88, + 0.102 + ], + "angle": 0, + "content": "X. Li and P. Jia, et al." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.122, + 0.882, + 0.154 + ], + "angle": 0, + "content": "efficiency and scalability across large-scale systems. Future work could explore lightweight, adaptive embeddings and hybrid frameworks that seamlessly fuse user profiles with real-time contexts." + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.157, + 0.885, + 0.224 + ], + "angle": 0, + "content": "- Evaluating Personalization Effectively: Current metrics like BLEU, ROUGE, and human evaluations fall short in capturing the nuanced alignment of outputs with dynamic user preferences, lacking tailored measures for personalization efficacy. Developing specialized benchmarks and metrics that assess long-term user satisfaction and adaptability is crucial for real-world applicability." + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.226, + 0.884, + 0.31 + ], + "angle": 0, + "content": "- Preserving Privacy through Device-Cloud Collaboration: Personalized retrieval often involves processing sensitive user data, raising privacy concerns, especially with the increased global emphasis on data protection regulations, such as the European Union's General Data Protection Regulation (GDPR). Consequently, a promising approach is the collaborative integration of on-device small Language models which handle sensitive personal data locally, with cloud-based LLM, which provides broader contextual knowledge." + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.312, + 0.884, + 0.395 + ], + "angle": 0, + "content": "- Personalized Agent Planning: Current research on agent planning remains mainly in its early stages, with much of the work focusing on building foundational frameworks such as GUI agents [81] and the application of agents across diverse domains [131]. Notably, the incorporation of personalized approaches has yet to be widely adopted. Exploring how to integrate personalized support into existing frameworks to enhance user experience represents a promising and valuable direction for future investigation." + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.399, + 0.884, + 0.449 + ], + "angle": 0, + "content": "- Ensuring Ethical and Coherent Systems: Bias in data processing, privacy concerns in user profiling, and coherence across retrieval and generation stages remain unresolved. Future directions should prioritize ethical safeguards, privacy-preserving techniques, and cross-stage optimization to build trustworthy, unified personalized systems." + }, + { + "type": "list", + "bbox": [ + 0.176, + 0.157, + 0.885, + 0.449 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.177, + 0.469, + 0.303, + 0.482 + ], + "angle": 0, + "content": "7 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.49, + 0.884, + 0.609 + ], + "angle": 0, + "content": "In this paper, we explore the landscape of personalization from Retrieval-Augmented Generation (RAG) to advanced LLM-based Agents, detailing adaptations across pre-retrieval, retrieval, and generation stages while extending into agentic capabilities. By reviewing recent literature, datasets, and metrics, we highlight the progress and diversity in enhancing user satisfaction through tailored AI systems. However, challenges such as scalability, effective evaluation, and ethical concerns underscore the need for innovative solutions. Future research should focus on lightweight frameworks, specialized benchmarks, and privacy-preserving techniques to advance personalized AI. Relevant papers and resources are also compiled online for ease of future research." + }, + { + "type": "title", + "bbox": [ + 0.177, + 0.631, + 0.274, + 0.643 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.187, + 0.649, + 0.804, + 0.661 + ], + "angle": 0, + "content": "[1] 2021. BERT: a review of applications in natural language processing and understanding. arXiv preprint arXiv:2103.11943 (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.187, + 0.662, + 0.882, + 0.685 + ], + "angle": 0, + "content": "[2] Mahyar Abbasian, Iman Azimi, Amir M Rahmani, and Ramesh Jain. 2023. Conversational health agents: A personalized llm-powered agent framework. arXiv preprint arXiv:2310.02374 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.187, + 0.687, + 0.881, + 0.711 + ], + "angle": 0, + "content": "[3] Bashar Alhafni, Vivek Kulkarni, Dhruv Kumar, and Vipul Raheja. 2024. Personalized Text Generation with Fine-Grained Linguistic Control. In Proceedings of the 1st Workshop on Personalization of Generative AI Systems (PERSONALIZE 2024). 88–101." + }, + { + "type": "ref_text", + "bbox": [ + 0.188, + 0.712, + 0.698, + 0.724 + ], + "angle": 0, + "content": "[4] Amazon. [n.d.]. Amazon Customer Review Dataset. Online dataset. https://nijianmo.github.io/amazon/" + }, + { + "type": "ref_text", + "bbox": [ + 0.188, + 0.726, + 0.883, + 0.76 + ], + "angle": 0, + "content": "[5] Steven Au, Cameron J Dimacali, Ojasmitha Pedirappagari, Namyong Park, Franck Dernoncourt, Yu Wang, Nikos Kanakaris, Hanieh Deilamsalehy, Ryan A Rossi, and Nesreen K Ahmed. 2025. Personalized Graph-Based Retrieval for Large Language Models. arXiv preprint arXiv:2501.02157 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.188, + 0.762, + 0.881, + 0.786 + ], + "angle": 0, + "content": "[6] Elias Bassani, Pranav Kasela, Alessandro Raganato, and Gabriella Pasi. 2022. A multi-domain benchmark for personalized search evaluation. In Proceedings of the 31st ACM International Conference on Information & Knowledge Management. 3822-3827." + }, + { + "type": "ref_text", + "bbox": [ + 0.188, + 0.788, + 0.881, + 0.811 + ], + "angle": 0, + "content": "[7] Elias Bassani, Nicola Tonellotto, and Gabriella Pasi. 2023. Personalized query expansion with contextual word embeddings. ACM Transactions on Information Systems 42, 2 (2023), 1-35." + }, + { + "type": "ref_text", + "bbox": [ + 0.187, + 0.813, + 0.862, + 0.825 + ], + "angle": 0, + "content": "[8] Oliver Baumann and Mirco Schoenfeld. 2024. PSQE: Personalized Semantic Query Expansion for user-centric query disambiguation. (2024)." + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.649, + 0.883, + 0.825 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.523, + 0.832, + 0.536, + 0.841 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.117, + 0.091, + 0.348, + 0.103 + ], + "angle": 0, + "content": "A Survey of Personalization: From RAG to Agent" + }, + { + "type": "header", + "bbox": [ + 0.54, + 0.091, + 0.822, + 0.102 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "ref_text", + "bbox": [ + 0.128, + 0.124, + 0.822, + 0.16 + ], + "angle": 0, + "content": "[9] Matthias Bender, Tom Crecelius, Mouna Kacimi, Sebastian Michel, Thomas Neumann, Josiane Xavier Parreira, Ralf Schenkel, and Gerhard Weikum. 2008. Exploiting social relations for query expansion and result ranking. In 2008 IEEE 24th International Conference on Data Engineering Workshop. IEEE, 501-506." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.161, + 0.823, + 0.186 + ], + "angle": 0, + "content": "[10] Marin Bertier, Rachid Guerraoui, Vincent Leroy, and Anne-Marie Kermarrec. 2009. Toward personalized query expansion. In Proceedings of the Second ACM EuroSys Workshop on Social Network Systems. 7-12." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.187, + 0.822, + 0.211 + ], + "angle": 0, + "content": "[11] Keping Bi, Qingyao Ai, and W Bruce Croft. 2021. Learning a fine-grained review-based transformer model for personalized product search. In Proceedings of the 44th international ACM SIGIR conference on research and development in information retrieval. 123-132." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.212, + 0.822, + 0.236 + ], + "angle": 0, + "content": "[12] Claudio Biancalana and Alessandro Micarelli. 2009. Social tagging in query expansion: A new way for personalized web search. In 2009 International Conference on Computational Science and Engineering, Vol. 4. IEEE, 1060-1065." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.237, + 0.461, + 0.249 + ], + "angle": 0, + "content": "[13] Microsoft Bing. [n.d]. Bing Search Engine. https://www.bing.com" + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.25, + 0.822, + 0.274 + ], + "angle": 0, + "content": "[14] Mohamed Reda Bouadjenek, Hakim Hacid, and Mokrane Bouzeghoub. 2019. Personalized social query expansion using social annotations. Transactions on Large-Scale Data-and Knowledge-Centered Systems XL (2019), 1-25." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.275, + 0.822, + 0.311 + ], + "angle": 0, + "content": "[15] Mohamed Reda Bouadjenek, Hakim Hacid, Mokrane Bouzeghoub, and Johann Daigremont. 2011. Personalized social query expansion using social bookmarking systems. In Proceedings of the 34th international ACM SIGIR conference on Research and development in Information Retrieval. 1113-1114." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.312, + 0.795, + 0.324 + ], + "angle": 0, + "content": "[16] Domenico Bulfamante. 2023. Generative enterprise search with extensible knowledge base using ai. Ph.D. Dissertation. Politecnico di Torino." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.326, + 0.822, + 0.349 + ], + "angle": 0, + "content": "[17] Hongru Cai, Yongqi Li, Wenjie Wang, ZHU Fengbin, Xiaoyu Shen, Wenjie Li, and Tat-Seng Chua. [n. d]. Large Language Models Empowered Personalized Web Agents. In THE WEB CONFERENCE 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.35, + 0.822, + 0.374 + ], + "angle": 0, + "content": "[18] Hongzhan Chen, Hehong Chen, Ming Yan, Wenshen Xu, Xing Gao, Weizhou Shen, Xiaojun Quan, Chenliang Li, Ji Zhang, Fei Huang, et al. 2024. Socialbench: Sociality evaluation of role-playing conversational agents. arXiv preprint arXiv:2403.13679 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.376, + 0.822, + 0.4 + ], + "angle": 0, + "content": "[19] Jiangjie Chen, Xintao Wang, Rui Xu, Siyu Yuan, Yikai Zhang, Wei Shi, Jian Xie, Shuang Li, Ruihan Yang, Tinghui Zhu, et al. 2024. From persona to personalization: A survey on role-playing language agents. arXiv preprint arXiv:2404.18231 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.401, + 0.822, + 0.425 + ], + "angle": 0, + "content": "[20] Ruizhe Chen, Xiaotian Zhang, Meng Luo, Wenhao Chai, and Zuozhu Liu. 2024. Pad: Personalized alignment of llms at decoding-time. arXiv preprint arXiv:2410.04070 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.426, + 0.822, + 0.45 + ], + "angle": 0, + "content": "[21] Weijie Chen, Ting Bai, Jinbo Su, Jian Luan, Wei Liu, and Chuan Shi. 2024. Kg-retriever: Efficient knowledge indexing for retrieval-augmented large language models. arXiv preprint arXiv:2412.05547 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.451, + 0.822, + 0.475 + ], + "angle": 0, + "content": "[22] Zheng Chen, Ziyan Jiang, Fan Yang, Eunah Cho, Xing Fan, Xiaojiang Huang, Yanbin Lu, and Aram Galstyan. 2023. Graph meets LLM: A novel approach to collaborative filtering for robust conversational understanding. arXiv preprint arXiv:2305.14449 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.476, + 0.822, + 0.5 + ], + "angle": 0, + "content": "[23] Hao Cheng, Shuo Wang, Wensheng Lu, Wei Zhang, Mingyang Zhou, Kezhong Lu, and Hao Liao. 2023. Explainable recommendation with personalized review retrieval and aspect learning. arXiv preprint arXiv:2306.12657 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.501, + 0.822, + 0.525 + ], + "angle": 0, + "content": "[24] Alexis Chevalier, Alexander Wettig, Anirudh Ajith, and Danqi Chen. 2023. Adapting language models to compress contexts. arXiv preprint arXiv:2305.14788 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.527, + 0.822, + 0.551 + ], + "angle": 0, + "content": "[25] Eunah Cho, Ziyan Jiang, Jie Hao, Zheng Chen, Saurabh Gupta, Xing Fan, and Chenlei Guo. 2021. Personalized search-based query rewrite system for conversational ai. In Proceedings of the 3rd Workshop on Natural Language Processing for Conversational AI. 179-188." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.552, + 0.822, + 0.576 + ], + "angle": 0, + "content": "[26]Hyundong Cho, Karishma Sharma, Nicolaas Jedema, Leonardo FR Ribeiro, Alessandro Moschitti, Ravi Krishnan, and Jonathan May. 2025. TuningFree Personalized Alignment via Trial-Error-Explain In-Context Learning. arXiv preprint arXiv:2502.08972 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.577, + 0.822, + 0.601 + ], + "angle": 0, + "content": "[27] Yanqi Dai, Huanran Hu, Lei Wang, Shengjie Jin, Xu Chen, and Zhiwu Lu. 2024. Mmrole: A comprehensive framework for developing and evaluating multimodal role-playing agents. arXiv preprint arXiv:2408.04203 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.602, + 0.822, + 0.626 + ], + "angle": 0, + "content": "[28] Wenlong Deng, Christos Thrampoulidis, and Xiaoxiao Li. 2024. Unlocking the potential of prompt-tuning in bridging generalized and personalized federated learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 6087-6097." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.627, + 0.822, + 0.651 + ], + "angle": 0, + "content": "[29] Yang Deng, Yaliang Li, Wenxuan Zhang, Bolin Ding, and Wai Lam. 2022. Toward personalized answer generation in e-commerce via multiperspective preference modeling. ACM Transactions on Information Systems (TOIS) 40, 4 (2022), 1-28." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.652, + 0.822, + 0.676 + ], + "angle": 0, + "content": "[30] Matthijs Douze, Alexandr Guzhva, Chengqi Deng, Jeff Johnson, Gergely Szilvasy, Pierre-Emmanuel Mazaré, Maria Lomeli, Lucas Hosseini, and Hervé Jégou. 2024. The Faiss library. (2024). arXiv:2401.08281 [cs.LG]" + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.677, + 0.433, + 0.689 + ], + "angle": 0, + "content": "[31] ESPN. [n.d.]. ESPN Sports Statistics Dataset. Online dataset." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.69, + 0.822, + 0.726 + ], + "angle": 0, + "content": "[32] Wenqi Fan, Yujuan Ding, Liangbo Ning, Shijie Wang, Hengyun Li, Dawei Yin, Tat-Seng Chua, and Qing Li. 2024. A survey on rag meeting llms: Towards retrieval-augmented large language models. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 6491-6501." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.728, + 0.822, + 0.751 + ], + "angle": 0, + "content": "[33] Byron J Gao, David C Anastasiu, and Xing Jiang. 2010. Utilizing user-input contextual terms for query disambiguation. In Coling 2010: Posters. 329-337." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.753, + 0.822, + 0.777 + ], + "angle": 0, + "content": "[34] Jingsheng Gao, Yixin Lian, Ziyi Zhou, Yuzhuo Fu, and Baoyuan Wang. 2023. LiveChat: A large-scale personalized dialogue dataset automatically constructed from live streaming. arXiv preprint arXiv:2306.08401 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.778, + 0.822, + 0.802 + ], + "angle": 0, + "content": "[35] Yunfan Gao, Yun Xiong, Xinyu Gao, Kangxiang Jia, Jinliu Pan, Yuxi Bi, Yi Dai, Jiawei Sun, Haofen Wang, and Haofen Wang. 2023. Retrievalaugmented generation for large language models: A survey. arXiv preprint arXiv:2312.10997 2 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.803, + 0.411, + 0.815 + ], + "angle": 0, + "content": "[36] Google. [n.d.]. Google Search. https://www.google.com" + }, + { + "type": "list", + "bbox": [ + 0.124, + 0.124, + 0.823, + 0.815 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.463, + 0.832, + 0.476, + 0.841 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.177, + 0.091, + 0.46, + 0.103 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "header", + "bbox": [ + 0.781, + 0.091, + 0.881, + 0.102 + ], + "angle": 0, + "content": "X. Li and P. Jia, et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.124, + 0.882, + 0.16 + ], + "angle": 0, + "content": "[37] Jia-Chen Gu, Hui Liu, Zhen-Hua Ling, Quan Liu, Zhigang Chen, and Xiaodan Zhu. 2021. Partner matters! an empirical study on fusing personas for personalized response selection in retrieval-based chatbots. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval. 565-574." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.161, + 0.882, + 0.198 + ], + "angle": 0, + "content": "[38] Jie Hao, Yang Liu, Xing Fan, Saurabh Gupta, Saleh Soltan, Rakesh Chada, Pradeep Natarajan, Chenlei Guo, and Gokhan Tur. 2022. CGF: Constrained generation framework for query rewriting in conversational AI. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track. 475-483." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.199, + 0.882, + 0.223 + ], + "angle": 0, + "content": "[39] Nicola Henze, Peter Dolog, and Wolfgang Nejdl. 2004. Reasoning and ontologies for personalized e-learning in the semantic web. Journal of Educational Technology & Society 7, 4 (2004), 82-97." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.224, + 0.882, + 0.249 + ], + "angle": 0, + "content": "[40] Sirui Hong, Xiawu Zheng, Jonathan Chen, Yuheng Cheng, Jinlin Wang, Ceyao Zhang, Zili Wang, Steven Ka Shing Yau, Zijuan Lin, Liyang Zhou, et al. 2023. Metagpt: Meta programming for multi-agent collaborative framework. arXiv preprint arXiv:2308.00352 3, 4 (2023), 6." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.25, + 0.882, + 0.286 + ], + "angle": 0, + "content": "[41] WANG Hongru, Minda Hu, Yang Deng, Rui Wang, Fei Mi, Weichao Wang, Yasheng Wang, Wai-Chung Kwan, Irwin King, and Kam-Fai Wong. [n. d]. Large Language Models as Source Planner for Personalized Knowledge-grounded Dialogues. In The 2023 Conference on Empirical Methods in Natural Language Processing." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.287, + 0.882, + 0.312 + ], + "angle": 0, + "content": "[42] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. 2022. Lora: Low-rank adaptation of large language models. ICLR 1, 2 (2022), 3." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.313, + 0.882, + 0.337 + ], + "angle": 0, + "content": "[43] Qiushi Huang, Shuai Fu, Xubo Liu, Wenwu Wang, Tom Ko, Yu Zhang, and Lilian Tang. 2024. Learning retrieval augmentation for personalized dialogue generation. arXiv preprint arXiv:2406.18847 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.338, + 0.882, + 0.362 + ], + "angle": 0, + "content": "[44] Wenlong Huang, Pieter Abbeel, Deepak Pathak, and Igor Mordatch. 2022. Language models as zero-shot planners: Extracting actionable knowledge for embodied agents. In International conference on machine learning. PMLR, 9118-9147." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.363, + 0.882, + 0.387 + ], + "angle": 0, + "content": "[45] Xu Huang, Weiwen Liu, Xiaolong Chen, Xingmei Wang, Hao Wang, Defu Lian, Yasheng Wang, Ruiming Tang, and Enhong Chen. 2024. Understanding the planning of LLM agents: A survey. arXiv preprint arXiv:2402.02716 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.388, + 0.882, + 0.412 + ], + "angle": 0, + "content": "[46] Rolf Jagerman, Honglei Zhuang, Zhen Qin, Xuanhui Wang, and Michael Bendersky. 2023. Query expansion by prompting large language models. arXiv preprint arXiv:2305.03653 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.413, + 0.882, + 0.449 + ], + "angle": 0, + "content": "[47] Joel Jang, Seungone Kim, Bill Yuchen Lin, Yizhong Wang, Jack Hessel, Luke Zettlemoyer, Hannaneh Hajishirzi, Yejin Choi, and Prithviraj Ammanabrolu. 2023. Personalized soups: Personalized large language model alignment via post-hoc parameter merging. arXiv preprint arXiv:2310.11564 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.45, + 0.882, + 0.488 + ], + "angle": 0, + "content": "[48] Pengyue Jia, Yiding Liu, Xiangyu Zhao, Xiaopeng Li, Changying Hao, Shuaiqiang Wang, and Dawei Yin. 2024. MILL: Mutual Verification with Large Language Models for Zero-Shot Query Expansion. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers). 2498-2518." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.489, + 0.882, + 0.513 + ], + "angle": 0, + "content": "[49] Guangyuan Jiang, Manjie Xu, Song-Chun Zhu, Wenjuan Han, Chi Zhang, and Yixin Zhu. 2023. Evaluating and inducing personality in pre-trained language models. Advances in Neural Information Processing Systems 36 (2023), 10622-10643." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.514, + 0.882, + 0.55 + ], + "angle": 0, + "content": "[50] Hideaki Joko, Shubham Chatterjee, Andrew Ramsay, Arjen P De Vries, Jeff Dalton, and Faegheh Hasibi. 2024. Doing personal laps: Llm-augmented dialogue construction for personalized multi-session conversational search. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 796-806." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.551, + 0.882, + 0.576 + ], + "angle": 0, + "content": "[51] Wang-Cheng Kang, Jianmo Ni, Nikhil Mehta, Maheswaran Sathiamoorthy, Lichan Hong, Ed Chi, and Derek Zhiyuan Cheng. 2023. Do llms understand user preferences? evaluating llms on user rating prediction. arXiv preprint arXiv:2305.06474 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.577, + 0.882, + 0.601 + ], + "angle": 0, + "content": "[52] Manojkumar Rangasamy Kannadasan and Grigor Aslanyan. 2019. Personalized query auto-completion through a lightweight representation of the user context. arXiv preprint arXiv:1905.01386 (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.602, + 0.882, + 0.651 + ], + "angle": 0, + "content": "[53] Anjuli Kannan, Karol Kurach, Sujith Ravi, Tobias Kaufmann, Andrew Tomkins, Balint Miklos, Greg Corrado, Laszlo Lukacs, Marina Ganea, Peter Young, and Vivek Ramavajjala. 2016. Smart Reply: Automated Response Suggestion for Email. In Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (San Francisco, California, USA) (KDD '16). Association for Computing Machinery, New York, NY, USA, 955-964. https://doi.org/10.1145/2939672.2939801" + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.652, + 0.882, + 0.676 + ], + "angle": 0, + "content": "[54] Mandar Kulkarni, Praveen Tangarajan, Kyung Kim, and Anusua Trivedi. 2024. Reinforcement learning for optimizing rag for domain chatbots. arXiv preprint arXiv:2401.06800 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.677, + 0.882, + 0.702 + ], + "angle": 0, + "content": "[55] Ishita Kumar, Snigdha Viswanathan, Sushrita Yerra, Alireza Salemi, Ryan A Rossi, Franck Dernoncourt, Hanieh Deilamsalehy, Xiang Chen, Ruiyi Zhang, Shubham Agarwal, et al. 2024. Longlamp: A benchmark for personalized long-form text generation. arXiv preprint arXiv:2407.11016 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.703, + 0.882, + 0.727 + ], + "angle": 0, + "content": "[56] Brenden Lake and Marco Baroni. 2018. Generalization without systematicity: On the compositional skills of sequence-to-sequence recurrent networks. In International conference on machine learning. PMLR, 2873-2882." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.728, + 0.882, + 0.752 + ], + "angle": 0, + "content": "[57] Cheng Li, Mingyang Zhang, Qiao zhu Mei, Weize Kong, and Michael Bendersky. 2024. Learning to rewrite prompts for personalized text generation. In Proceedings of the ACM Web Conference 2024. 3367-3378." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.753, + 0.882, + 0.777 + ], + "angle": 0, + "content": "[58] Changhao Li, Yuchen Zhuang, Rushi Qiang, Haotian Sun, Hanjun Dai, Chao Zhang, and Bo Dai. 2024. Matryoshka: Learning to Drive Black-Box LLMs with LLMs. arXiv preprint arXiv:2410.20749 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.778, + 0.882, + 0.802 + ], + "angle": 0, + "content": "[59] Lei Li, Yongfeng Zhang, and Li Chen. 2023. Personalized prompt learning for explainable recommendation. ACM Transactions on Information Systems 41, 4 (2023), 1-26." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.803, + 0.882, + 0.828 + ], + "angle": 0, + "content": "[60] Sen Li, Fuyu Lv, Taiwei Jin, Guiyang Li, Yukun Zheng, Tao Zhuang, Qingwen Liu, Xiaoyi Zeng, James Kwok, and Qianli Ma. 2022. Query rewriting in taobao search. In Proceedings of the 31st ACM International Conference on Information & Knowledge Management. 3262-3271." + }, + { + "type": "list", + "bbox": [ + 0.183, + 0.124, + 0.882, + 0.828 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.523, + 0.832, + 0.535, + 0.841 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.117, + 0.091, + 0.348, + 0.103 + ], + "angle": 0, + "content": "A Survey of Personalization: From RAG to Agent" + }, + { + "type": "header", + "bbox": [ + 0.54, + 0.091, + 0.822, + 0.102 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "ref_text", + "bbox": [ + 0.123, + 0.124, + 0.822, + 0.148 + ], + "angle": 0, + "content": "[61] Xiaopeng Li, Lixin Su, Pengyue Jia, Xiangyu Zhao, Suqi Cheng, Junfeng Wang, and Dawei Yin. 2023. Agent4ranking: Semantic robust ranking via personalized query rewriting using multi-agent llm. arXiv preprint arXiv:2312.15450 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.123, + 0.149, + 0.822, + 0.173 + ], + "angle": 0, + "content": "[62] Xinyu Li, Ruiyang Zhou, Zachary C Lipton, and Liu Leqi. 2024. Personalized language modeling from personalized human feedback. arXiv preprint arXiv:2402.05133 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.175, + 0.821, + 0.199 + ], + "angle": 0, + "content": "[63] Yuanchun Li, Hao Wen, Weijun Wang, Xiangyu Li, Yizhen Yuan, Guohong Liu, Jiacheng Liu, Wenxing Xu, Xiang Wang, Yi Sun, et al. 2024. Personal llm agents: Insights and survey about the capability, efficiency and security. arXiv preprint arXiv:2401.05459 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.2, + 0.821, + 0.224 + ], + "angle": 0, + "content": "[64] Zehan Li, Xin Zhang, Yanzhao Zhang, Dingkun Long, Pengjun Xie, and Meishan Zhang. 2023. Towards general text embeddings with multi-stage contrastive learning. arXiv preprint arXiv:2308.03281 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.225, + 0.822, + 0.26 + ], + "angle": 0, + "content": "[65] Ruixue Lian, Sixing Lu, Clint Solomon, Gustavo Aguilar, Pragaash Ponnusamy, Jialong Han, Chengyuan Ma, and Chenlei Guo. 2023. PersonalTM: Transformer memory for personalized retrieval. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval. 2256-2260." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.262, + 0.822, + 0.286 + ], + "angle": 0, + "content": "[66] Shan-Mu Lin and Chuen-Min Huang. 2006. Personalized optimal search in local query expansion. In Proceedings of the 18th Conference on Computational Linguistics and Speech Processing. 221-236." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.288, + 0.822, + 0.311 + ], + "angle": 0, + "content": "[67] Junling Liu, Chao Liu, Peilin Zhou, Renjie Lv, Kang Zhou, and Yan Zhang. 2023. Is chatgpt a good recommender? a preliminary study. arXiv preprint arXiv:2304.10149 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.313, + 0.822, + 0.337 + ], + "angle": 0, + "content": "[68] Jiahong Liu, Zexuan Qiu, Zhongyang Li, Quanyu Dai, Jieming Zhu, Minda Hu, Menglin Yang, and Irwin King. 2025. A Survey of Personalized Large Language Models: Progress and Future Directions. arXiv preprint arXiv:2502.11528 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.338, + 0.822, + 0.362 + ], + "angle": 0, + "content": "[69] Nelson F Liu, Kevin Lin, John Hewitt, Ashwin Paranjape, Michele Bevilacqua, Fabio Petroni, and Percy Liang. 2024. Lost in the middle: How language models use long contexts. Transactions of the Association for Computational Linguistics 12 (2024), 157-173." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.363, + 0.822, + 0.387 + ], + "angle": 0, + "content": "[70] Qijiong Liu, Nuo Chen, Tetsuya Sakai, and Xiao-Ming Wu. 2024. Once: Boosting content-based recommendation with both open-and closed-source large language models. In Proceedings of the 17th ACM International Conference on Web Search and Data Mining. 452-461." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.388, + 0.822, + 0.412 + ], + "angle": 0, + "content": "[71] Shuai Liu, Hyundong J Cho, Marjorie Freedman, Xuezhe Ma, and Jonathan May. 2023. RECAP: retrieval-enhanced context-aware prefix encoder for personalized dialogue response generation. arXiv preprint arXiv:2306.07206 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.413, + 0.783, + 0.425 + ], + "angle": 0, + "content": "[72] Tyler Lu and Craig Boutilier. 2011. Budgeted social choice: From consensus to personalized decision making. In *IJCAI*, Vol. 11, 280-286." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.427, + 0.822, + 0.45 + ], + "angle": 0, + "content": "[73] Zhengyi Ma, Zhicheng Dou, Yutao Zhu, Hanxun Zhong, and Ji-Rong Wen. 2021. One chatbot per person: Creating personalized chatbots based on implicit user profiles. In Proceedings of the 44th international ACM SIGIR conference on research and development in information retrieval. 555-564." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.451, + 0.822, + 0.475 + ], + "angle": 0, + "content": "[74] Aman Madaan, Niket Tandon, Peter Clark, and Yiming Yang. 2022. Memory-assisted prompt editing to improve GPT-3 after deployment. arXiv preprint arXiv:2201.06009 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.476, + 0.822, + 0.5 + ], + "angle": 0, + "content": "[75] Shengyu Mao, Xiaohan Wang, Mengru Wang, Yong Jiang, Pengjun Xie, Fei Huang, and Ningyu Zhang. 2024. Editing Personality for Large Language Models. In CCF International Conference on Natural Language Processing and Chinese Computing. Springer, 241-254." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.502, + 0.822, + 0.525 + ], + "angle": 0, + "content": "[76] Yuren Mao, Xuemei Dong, Wenyi Xu, Yunjun Gao, Bin Wei, and Ying Zhang. 2024. Fit-rag: black-box rag with factual information and token reduction. arXiv preprint arXiv:2403.14374 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.527, + 0.822, + 0.563 + ], + "angle": 0, + "content": "[77] Puneet Mathur, Zhe Liu, Ke Li, Yingyi Ma, Gil Keren, Zeeshan Ahmed, Dinesh Manocha, and Xuedong Zhang. 2023. Personal: Language model personalization via domain-distributed span aggregated k-nearest n-gram retrieval augmentation. In Findings of the Association for Computational Linguistics: EMNLP 2023. 11314-11328." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.565, + 0.822, + 0.588 + ], + "angle": 0, + "content": "[78] Fatemehsadat Mireshghallah, Vaishnavi Shrivastava, Milad Shokouhi, Taylor Berg-Kirkpatrick, Robert Sim, and Dimitrios Dimitriadis. 2021. Identifier: Implicit user representations for simple and effective personalized sentiment analysis. arXiv preprint arXiv:2110.00135 (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.59, + 0.822, + 0.613 + ], + "angle": 0, + "content": "[79] Philippe Mulhem, Nawal Ould Amer, and Mathias Gery. 2016. Axiomatic term-based personalized query expansion using bookmarking system. In International Conference on Database and Expert Systems Applications. Springer, 235-243." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.615, + 0.822, + 0.65 + ], + "angle": 0, + "content": "[80] Sheshera Mysore, Zhuoran Lu, Mengting Wan, Longqi Yang, Steve Menezes, Tina Baghaee, Emmanuel Barajas Gonzalez, Jennifer Neville, and Tara Safavi. 2023. Pearl: Personalizing large language model writing assistants with generation-calibrated retrievers. arXiv preprint arXiv:2311.09180 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.653, + 0.822, + 0.676 + ], + "angle": 0, + "content": "[81] Dang Nguyen, Jian Chen, Yu Wang, Gang Wu, Namyong Park, Zhengmian Hu, Hanjia Lyu, Junda Wu, Ryan Aponte, Yu Xia, et al. 2024. Gui agents: A survey. arXiv preprint arXiv:2412.13501 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.677, + 0.822, + 0.701 + ], + "angle": 0, + "content": "[82] Duy A Nguyen, Rishi Kesav Mohan, Van Yang, Pritom Saha Akash, and Kevin Chen-Chuan Chang. 2025. RL-based Query Rewriting with Distilled LLM for online E-Commerce Systems. arXiv preprint arXiv:2501.18056 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.703, + 0.822, + 0.739 + ], + "angle": 0, + "content": "[83] Jianmo Ni, Jiacheng Li, and Julian McAuley. 2019. Justifying recommendations using distantly-labeled reviews and fine-grained aspects. In Proceedings of the 2019 conference on empirical methods in natural language processing and the 9th international joint conference on natural language processing (EMNLP-IJCNLP). 188-197." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.741, + 0.822, + 0.765 + ], + "angle": 0, + "content": "[84] Lin Ning, Luyang Liu, Jiaxing Wu, Neo Wu, Devora Berlowitz, Sushant Prakash, Bradley Green, Shawn O'Banion, and Jun Xie. 2024. User-llm: Efficient llm contextualization with user embeddings. arXiv preprint arXiv:2402.13598 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.766, + 0.822, + 0.789 + ], + "angle": 0, + "content": "[85] Douglas Oard, William Webber, David Kirsch, and Sergey Golitsynski. 2015. Avocado research email collection. Philadelphia: Linguistic Data Consortium (2015)." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.79, + 0.74, + 0.802 + ], + "angle": 0, + "content": "[86] U.S. National Library of Medicine. [n.d.]. PubMed: A Free Resource for Biomedical Literature. https://pubmed.ncbi.nlm.nih.gov/" + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.804, + 0.822, + 0.828 + ], + "angle": 0, + "content": "[87] Joon Sung Park, Joseph O'Brien, Carrie Jun Cai, Meredith Ringel Morris, Percy Liang, and Michael S Bernstein. 2023. Generative agents: Interactive simulacra of human behavior. In Proceedings of the 36th annual acm symposium on user interface software and technology. 1-22." + }, + { + "type": "list", + "bbox": [ + 0.123, + 0.124, + 0.822, + 0.828 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.463, + 0.832, + 0.475, + 0.841 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.177, + 0.091, + 0.46, + 0.103 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "header", + "bbox": [ + 0.781, + 0.091, + 0.881, + 0.102 + ], + "angle": 0, + "content": "X. Li and P. Jia, et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.184, + 0.124, + 0.882, + 0.148 + ], + "angle": 0, + "content": "[88] Greg Pass, Abdur Chowdhury, and Cayley Torgeson. 2006. A picture of search. In Proceedings of the 1st International Conference on Scalable Information Systems (Hong Kong) (InfoScale '06). Association for Computing Machinery, New York, NY, USA, 1-es. https://doi.org/10.1145/1146847.1146848" + }, + { + "type": "ref_text", + "bbox": [ + 0.184, + 0.149, + 0.882, + 0.174 + ], + "angle": 0, + "content": "[89] Vadim Igorevich Pavliukevich, Alina Khasanovna Zherdeva, Olesya Vladimirovna Makhnytkina, and Dmitriy Viktorovich Dyrmovskiy. [n. d.]. Improving RAG with LoRA finetuning for persona text generation. ([n. d.])." + }, + { + "type": "ref_text", + "bbox": [ + 0.184, + 0.174, + 0.878, + 0.186 + ], + "angle": 0, + "content": "[90] Dan Peng, Zhihui Fu, and Jun Wang. 2024. Pocketllm: Enabling on-device fine-tuning for personalized llms. arXiv preprint arXiv:2407.01031 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.187, + 0.881, + 0.211 + ], + "angle": 0, + "content": "[91] Qiyao Peng, Hongtao Liu, Hongyan Xu, Qing Yang, Minglai Shao, and Wenjun Wang. 2024. Review-LLM: Harnessing Large Language Models for Personalized Review Generation. arXiv:2407.07487 [cs.CL] https://arxiv.org/abs/2407.07487" + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.212, + 0.881, + 0.237 + ], + "angle": 0, + "content": "[92] Hongjin Qian, Zhicheng Dou, Yutao Zhu, Yueyuan Ma, and Ji-Rong Wen. 2021. Learning implicit user profile for personalized retrieval-based chatbot. In proceedings of the 30th ACM international conference on Information & Knowledge Management. 1467-1477." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.238, + 0.881, + 0.273 + ], + "angle": 0, + "content": "[93] Hongjin Qian, Xiahe Li, Hanxun Zhong, Yu Guo, Yueyuan Ma, Yutao Zhu, Zhanliang Liu, Zhicheng Dou, and Ji-Rong Wen. 2021. Pchatbot: a large-scale dataset for personalized chatbot. In Proceedings of the 44th international ACM SIGIR conference on research and development in information retrieval. 2470-2477." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.274, + 0.881, + 0.299 + ], + "angle": 0, + "content": "[94] Xiaoru Qu, Yifan Wang, Zhao Li, and Jun Gao. 2024. Graph-enhanced prompt learning for personalized review generation. Data Science and Engineering 9, 3 (2024), 309-324." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.3, + 0.881, + 0.312 + ], + "angle": 0, + "content": "[95] A. Rajaraman and J.D. Ullman. 2011. Mining of Massive Datasets. Cambridge University Press. https://books.google.co.uk/books?id=OefRhZyYOb0C" + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.313, + 0.881, + 0.337 + ], + "angle": 0, + "content": "[96] Yiting Ran, Xintao Wang, Rui Xu, Xinfeng Yuan, Jiaqing Liang, Deqing Yang, and Yanghua Xiao. 2024. Capturing minds, not just words: Enhancing role-playing language models with personality-indicative data. arXiv preprint arXiv:2406.18921 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.338, + 0.881, + 0.362 + ], + "angle": 0, + "content": "[97] Chandan K. Reddy, Lluis Marquez, Fran Valero, Nikhil Rao, Hugo Zaragoza, Sambaran Bandyopadhyay, Arnab Biswas, Anlu Xing, and Karthik Subbian. 2022. Shopping Queries Dataset: A Large-Scale ESCI Benchmark for Improving Product Search. (2022). arXiv:2206.06588" + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.363, + 0.878, + 0.375 + ], + "angle": 0, + "content": "[98] Nils Reimers and Iryna Gurevych. 2019. Sentence-bert: Sentence embeddings using siamese bert-networks. arXiv preprint arXiv:1908.10084 (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.376, + 0.881, + 0.4 + ], + "angle": 0, + "content": "[99] Ruiyang Ren, Peng Qiu, Yingqi Qu, Jing Liu, Wayne Xin Zhao, Hua Wu, Ji-Rong Wen, and Haifeng Wang. 2024. Bases: Large-scale web search user simulation with large language model based agents. arXiv preprint arXiv:2402.17505 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.18, + 0.401, + 0.881, + 0.424 + ], + "angle": 0, + "content": "[100] Matthew Renze and Erhan Guven. 2024. Self-reflection in llm agents: Effects on problem-solving performance. arXiv preprint arXiv:2405.06682 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.18, + 0.426, + 0.881, + 0.45 + ], + "angle": 0, + "content": "[101] Chris Richardson, Yao Zhang, Kellen Gillespie, Sudipta Kar, Arshdeep Singh, Zeynab Raeesy, Omar Zia Khan, and Abhinav Sethy. 2023. Integrating summarization and retrieval for enhanced personalization via large language models. arXiv preprint arXiv:2310.20081 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.18, + 0.451, + 0.881, + 0.475 + ], + "angle": 0, + "content": "[102] Stephen Robertson, Hugo Zaragoza, et al. 2009. The probabilistic relevance framework: BM25 and beyond. Foundations and Trends in Information Retrieval 3, 4 (2009), 333-389." + }, + { + "type": "ref_text", + "bbox": [ + 0.18, + 0.476, + 0.881, + 0.5 + ], + "angle": 0, + "content": "[103] Alireza Salemi, Surya Kallumadi, and Hamed Zamani. 2024. Optimization methods for personalizing large language models through retrieval augmentation. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 752-762." + }, + { + "type": "ref_text", + "bbox": [ + 0.18, + 0.501, + 0.881, + 0.526 + ], + "angle": 0, + "content": "[104] Alireza Salemi, Cheng Li, Mingyang Zhang, Qiao zhu Mei, Weize Kong, Tao Chen, Zhuowan Li, Michael Bendersky, and Hamed Zamani. 2025. Reasoning-Enhanced Self-Training for Long-Form Personalized Text Generation. arXiv preprint arXiv:2501.04167 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.18, + 0.527, + 0.881, + 0.551 + ], + "angle": 0, + "content": "[105] Alireza Salemi, Sheshera Mysore, Michael Bendersky, and Hamed Zamani. 2024. LaMP: When Large Language Models Meet Personalization. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). 7370-7392." + }, + { + "type": "ref_text", + "bbox": [ + 0.18, + 0.552, + 0.881, + 0.575 + ], + "angle": 0, + "content": "[106] Alireza Salemi and Hamed Zamani. 2024. Learning to Rank for Multiple Retrieval-Augmented Models through Iterative Utility Maximization. arXiv preprint arXiv:2410.09942 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.18, + 0.577, + 0.881, + 0.601 + ], + "angle": 0, + "content": "[107] Shibani Santurkar, Esin Durmus, Faisal Ladhak, Cinoo Lee, Percy Liang, and Tatsunori Hashimoto. 2023. Whose opinions do language models reflect?. In International Conference on Machine Learning. PMLR, 29971-30004." + }, + { + "type": "ref_text", + "bbox": [ + 0.18, + 0.602, + 0.881, + 0.626 + ], + "angle": 0, + "content": "[108] Rossano Schifanella, Alain Barrat, Ciro Cattuto, Benjamin Markines, and Filippo Menczer. 2010. Folks in folksonomies: social link prediction from shared metadata. In Proceedings of the third ACM international conference on Web search and data mining. 271-280." + }, + { + "type": "ref_text", + "bbox": [ + 0.18, + 0.627, + 0.881, + 0.651 + ], + "angle": 0, + "content": "[109] Noor Shaker, Georgios Yannakakis, and Julian Togelius. 2010. Towards automatic personalized content generation for platform games. In Proceedings of the AAAI Conference on Artificial Intelligence and Interactive Digital Entertainment, Vol. 6. 63-68." + }, + { + "type": "ref_text", + "bbox": [ + 0.18, + 0.652, + 0.878, + 0.664 + ], + "angle": 0, + "content": "[110] Yunfan Shao, Linyang Li, Junqi Dai, and Xipeng Qiu. 2023. Character-llm: A trainable agent for role-playing. arXiv preprint arXiv:2310.10158 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.18, + 0.665, + 0.881, + 0.689 + ], + "angle": 0, + "content": "[111] Jocelyn Shen, Joel Mire, Hae Won Park, Cynthia Breazeal, and Maarten Sap. 2024. HEART-felt Narratives: Tracing Empathy and Narrative Style in Personal Stories with LLMs. arXiv preprint arXiv:2405.17633 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.18, + 0.69, + 0.881, + 0.714 + ], + "angle": 0, + "content": "[112] Yunxiao Shi, Xing Zi, Zijing Shi, Haimin Zhang, Qiang Wu, and Min Xu. 2024. Eragent: Enhancing retrieval-augmented language models with improved accuracy, efficiency, and personalization. arXiv preprint arXiv:2405.06683 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.18, + 0.715, + 0.881, + 0.739 + ], + "angle": 0, + "content": "[113] Aditi Singh, Abul Ehtesham, Saket Kumar, and Tala Talaei Khoei. 2025. Agentic Retrieval-Augmented Generation: A Survey on Agentic RAG. arXiv preprint arXiv:2501.09136 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.18, + 0.741, + 0.881, + 0.776 + ], + "angle": 0, + "content": "[114] Harmanpreet Singh, Nikhil Verma, Yixiao Wang, Manasa Bharadwaj, Homa Fashandi, Kevin Ferreira, and Chul Lee. 2024. Personal Large Language Model Agents: A Case Study on Tailored Travel Planning. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: Industry Track. 486-514." + }, + { + "type": "ref_text", + "bbox": [ + 0.18, + 0.778, + 0.881, + 0.814 + ], + "angle": 0, + "content": "[115] Shamane Siriwardhana, Rivindu Weerasekera, Elliott Wen, Tharindu Kaluarachchi, Rajib Rana, and Suranga Nanayakkara. 2023. Improving the domain adaptation of retrieval augmented generation (RAG) models for open domain question answering. Transactions of the Association for Computational Linguistics 11 (2023), 1-17." + }, + { + "type": "ref_text", + "bbox": [ + 0.18, + 0.816, + 0.854, + 0.828 + ], + "angle": 0, + "content": "[116] Mingyang Song and Mao Zheng. 2024. A Survey of Query Optimization in Large Language Models. arXiv preprint arXiv:2412.17558 (2024)." + }, + { + "type": "list", + "bbox": [ + 0.18, + 0.124, + 0.882, + 0.828 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.523, + 0.832, + 0.536, + 0.84 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.116, + 0.091, + 0.349, + 0.103 + ], + "angle": 0, + "content": "A Survey of Personalization: From RAG to Agent" + }, + { + "type": "header", + "bbox": [ + 0.54, + 0.091, + 0.822, + 0.102 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.124, + 0.644, + 0.136 + ], + "angle": 0, + "content": "[117] Spotify. 2023. Annoy: Approximate Nearest Neighbors in C++/Python. https://github.com/spotify/annoy" + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.137, + 0.821, + 0.16 + ], + "angle": 0, + "content": "[118] Stuck_In_the Matrix. 2015. Reddit Public Comments (2007-10 through 2015-05). (2015). https://www.reddit.com/r/datasets/comments/3bxlg7/i_have EVERY_publicly-available Reddit_COMMENT/" + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.162, + 0.821, + 0.185 + ], + "angle": 0, + "content": "[119] Lei Sun, Jinming Zhao, and Qin Jin. 2024. Revealing Personality Traits: A New Benchmark Dataset for Explanable Personality Recognition on Dialogues. arXiv preprint arXiv:2409.19723 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.187, + 0.821, + 0.211 + ], + "angle": 0, + "content": "[120] Zhaoxuan Tan, Zheyuan Liu, and Meng Jiang. 2024. Personalized pieces: Efficient personalized large language models through collaborative efforts. arXiv preprint arXiv:2406.10471 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.213, + 0.821, + 0.236 + ], + "angle": 0, + "content": "[121] Zhaoxuan Tan, Qingkai Zeng, Yijun Tian, Zheyuan Liu, Bing Yin, and Meng Jiang. 2024. Democratizing large language models via personalized parameter-efficient fine-tuning. arXiv preprint arXiv:2402.04401 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.237, + 0.821, + 0.261 + ], + "angle": 0, + "content": "[122] Zhaoxuan Tan, Qingkai Zeng, Yijun Tian, Zheyuan Liu, Bing Yin, and Meng Jiang. 2025. Democratizing Large Language Models via Personalized Parameter-Efficient Fine-tuning. arXiv:2402.04401 [cs.CL] https://arxiv.org/abs/2402.04401" + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.262, + 0.821, + 0.286 + ], + "angle": 0, + "content": "[123] Quan Tu, Shilong Fan, Zihang Tian, and Rui Yan. 2024. Charactereval: A Chinese benchmark for role-playing conversational agent evaluation. arXiv preprint arXiv:2401.01275 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.287, + 0.574, + 0.299 + ], + "angle": 0, + "content": "[124] Cornell University. [n.d.]. arXiv: An Open Access Repository for Research. https://arxiv.org/" + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.3, + 0.821, + 0.336 + ], + "angle": 0, + "content": "[125] Hemanth Vemuri, Sheshansh Agrawal, Shivam Mittal, Deepak Saini, Akshay Soni, Abhinav V Sambasivan, Wenhao Lu, Yajun Wang, Mehul Parsana, Purushottam Kar, et al. 2023. Personalized retrieval over millions of items. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1014-1022." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.337, + 0.821, + 0.362 + ], + "angle": 0, + "content": "[126] Bryan Wang, Gang Li, and Yang Li. 2023. Enabling conversational interaction with mobile ui using large language models. In Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems. 1-17." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.363, + 0.821, + 0.387 + ], + "angle": 0, + "content": "[127] Guanzhi Wang, Yuqi Xie, Yunfan Jiang, Ajay Mandlekar, Chaowei Xiao, Yuke Zhu, Linxi Fan, and Anima Anandkumar. 2023. Voyager: An open-ended embodied agent with large language models. arXiv preprint arXiv:2305.16291 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.388, + 0.821, + 0.412 + ], + "angle": 0, + "content": "[128] Hongru Wang, Wenyu Huang, Yang Deng, Rui Wang, Zezhong Wang, Yufei Wang, Fei Mi, Jeff Z Pan, and Kam-Fai Wong. 2024. Unims-rag: A unified multi-source retrieval-augmented generation for personalized dialogue systems. arXiv preprint arXiv:2401.13256 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.413, + 0.821, + 0.437 + ], + "angle": 0, + "content": "[129] Hongru Wang, Rui Wang, Fei Mi, Yang Deng, Zezhong Wang, Bin Liang, Ruifeng Xu, and Kam-Fai Wong. 2023. Cue-CoT: Chain-of-thought prompting for responding to in-depth dialogue questions with LLMs. arXiv preprint arXiv:2305.11792 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.438, + 0.821, + 0.462 + ], + "angle": 0, + "content": "[130] Jian Wang, Yi Cheng, Dongding Lin, Chak Tou Leong, and Wenjie Li. 2023. Target-oriented proactive dialogue systems with personalization: Problem formulation and dataset curation. arXiv preprint arXiv:2310.07397 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.463, + 0.821, + 0.487 + ], + "angle": 0, + "content": "[131] Lei Wang, Chen Ma, Xueyang Feng, Zeyu Zhang, Hao Yang, Jingsen Zhang, Zhiyuan Chen, Jiakai Tang, Xu Chen, Yankai Lin, et al. 2024. A survey on large language model based autonomous agents. Frontiers of Computer Science 18, 6 (2024), 186345." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.488, + 0.811, + 0.5 + ], + "angle": 0, + "content": "[132] Liang Wang, Nan Yang, and Furu Wei. 2023. Query2doc: Query expansion with large language models. arXiv preprint arXiv:2303.07678 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.501, + 0.821, + 0.525 + ], + "angle": 0, + "content": "[133] Lei Wang, Jingsen Zhang, Hao Yang, Zhiyuan Chen, Jiakai Tang, Zeyu Zhang, Xu Chen, Yankai Lin, Ruihua Song, Wayne Xin Zhao, et al. 2023. User behavior simulation with large language model based agents. arXiv preprint arXiv:2306.02552 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.527, + 0.821, + 0.551 + ], + "angle": 0, + "content": "[134] Xintao Wang, Yunze Xiao, Jen-tse Huang, Siyu Yuan, Rui Xu, Haoran Guo, Quan Tu, Yaying Fei, Ziang Leng, Wei Wang, et al. 2023. Incharacter: Evaluating personality fidelity in role-playing agents through psychological interviews. arXiv preprint arXiv:2310.17976 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.552, + 0.821, + 0.576 + ], + "angle": 0, + "content": "[135] Yixiao Wang, Homa Fashandi, and Kevin Ferreira. 2024. Investigating the Personality Consistency in Quantized Role-Playing Dialogue Agents. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: Industry Track. 239–255." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.577, + 0.821, + 0.601 + ], + "angle": 0, + "content": "[136] Yu Wang, Yifan Gao, Xiusi Chen, Haoming Jiang, Shiyang Li, Jingfeng Yang, Qingyu Yin, Zheng Li, Xian Li, Bing Yin, et al. [n.d.]. MEMORYLLM: Towards Self-Updatable Large Language Models. In Forty-first International Conference on Machine Learning." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.602, + 0.821, + 0.626 + ], + "angle": 0, + "content": "[137] Zheng Wang, Zhongyang Li, Zeren Jiang, Dandan Tu, and Wei Shi. 2024. Crafting Personalized Agents through Retrieval-Augmented Generation on Editable Memory Graphs. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing. 4891-4906." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.627, + 0.821, + 0.651 + ], + "angle": 0, + "content": "[138] Zijie J Wang and Duen Horng Chau. 2024. MeMemo: On-device Retrieval Augmentation for Private and Personalized Text Generation. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 2765-2770." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.652, + 0.821, + 0.688 + ], + "angle": 0, + "content": "[139] Zekun Moore Wang, Zhongyuan Peng, Haoran Que, Jiaheng Liu, Wangchunshu Zhou, Yuhan Wu, Hongcheng Guo, Ruitong Gan, Zehao Ni, Jian Yang, et al. 2023. Rolellm: Benchmarking, eliciting, and enhancing role-playing abilities of large language models. arXiv preprint arXiv:2310.00746 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.69, + 0.821, + 0.726 + ], + "angle": 0, + "content": "[140] Tianxin Wei, Bowen Jin, Ruirui Li, Hansi Zeng, Zhengyang Wang, Jianhui Sun, Qingyu Yin, Hanqing Lu, Suhang Wang, Jingrui He, et al. 2024. Towards unified multi-modal personalization: Large vision-language models for generative recommendation and beyond. arXiv preprint arXiv:2403.10667 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.728, + 0.821, + 0.752 + ], + "angle": 0, + "content": "[141] Robert Wetzker, Carsten Zimmermann, and Christian Bauchage. 2008. Analyzing social bookmarking systems: A del. icio. us cookbook. In Proceedings of the ECAI 2008 Mining Social Data Workshop. 26-30." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.753, + 0.821, + 0.777 + ], + "angle": 0, + "content": "[142] Stanisław Wozniak, Bartlomiej Koptyra, Arkadiusz Janz, Przemysław Kazienko, and Jan Kocón. 2024. Personalized large language models. arXiv preprint arXiv:2402.09269 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.778, + 0.821, + 0.802 + ], + "angle": 0, + "content": "[143] Junde Wu, Jiayuan Zhu, Yunli Qi, Jingkun Chen, Min Xu, Filippo Menolascina, and Vicente Grau. 2024. Medical graph rag: Towards safe medical large language model via graph retrieval-augmented generation. arXiv preprint arXiv:2408.04187 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.803, + 0.821, + 0.827 + ], + "angle": 0, + "content": "[144] Xuan Wu, Dong Zhou, Yu Xu, and Seamus Lawless. 2017. Personalized query expansion utilizing multi-relational social data. In 2017 12th International Workshop on Semantic and Social Media Adaptation and Personalization (SMAP). IEEE, 65-70." + }, + { + "type": "list", + "bbox": [ + 0.118, + 0.124, + 0.821, + 0.827 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.463, + 0.832, + 0.475, + 0.841 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.177, + 0.091, + 0.46, + 0.103 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "header", + "bbox": [ + 0.781, + 0.091, + 0.881, + 0.102 + ], + "angle": 0, + "content": "X. Li and P. Jia, et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.178, + 0.124, + 0.882, + 0.16 + ], + "angle": 0, + "content": "[145] Yunjia Xi, Weiwen Liu, Jianghao Lin, Xiaoling Cai, Hong Zhu, Jieming Zhu, Bo Chen, Ruiming Tang, Weinan Zhang, and Yong Yu. 2024. Towards open-world recommendation with knowledge augmentation from large language models. In Proceedings of the 18th ACM Conference on Recommender Systems. 12-22." + }, + { + "type": "ref_text", + "bbox": [ + 0.177, + 0.161, + 0.882, + 0.186 + ], + "angle": 0, + "content": "[146] Zhiheng Xi, Wenxiang Chen, Xin Guo, Wei He, Yiwen Ding, Boyang Hong, Ming Zhang, Junzhe Wang, Senjie Jin, Enyu Zhou, et al. 2025. The rise and potential of large language model based agents: A survey. Science China Information Sciences 68, 2 (2025), 121101." + }, + { + "type": "ref_text", + "bbox": [ + 0.178, + 0.187, + 0.881, + 0.211 + ], + "angle": 0, + "content": "[147] Shitao Xiao, Zheng Liu, Peitian Zhang, Niklas Muennighoff, Defu Lian, and Jian-Yun Nie. 2024. C-pack: Packed resources for general chinese embeddings. In Proceedings of the 47th international ACM SIGIR conference on research and development in information retrieval. 641-649." + }, + { + "type": "ref_text", + "bbox": [ + 0.178, + 0.212, + 0.881, + 0.237 + ], + "angle": 0, + "content": "[148] Huatao Xu, Liying Han, Qirui Yang, Mo Li, and Mani Srivastava. 2024. Penetrative ai: Making llms comprehend the physical world. In Proceedings of the 25th International Workshop on Mobile Computing Systems and Applications. 1-7." + }, + { + "type": "ref_text", + "bbox": [ + 0.178, + 0.238, + 0.881, + 0.261 + ], + "angle": 0, + "content": "[149] Hongyan Xu, Hongtao Liu, Pengfei Jiao, and Wenjun Wang. 2021. Transformer reasoning network for personalized review summarization. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1452-1461." + }, + { + "type": "ref_text", + "bbox": [ + 0.178, + 0.262, + 0.881, + 0.286 + ], + "angle": 0, + "content": "[150] Xinchao Xu, Zhibin Gou, Wenquan Wu, Zheng-Yu Niu, Hua Wu, Haifeng Wang, and Shihang Wang. 2022. Long time no see! open-domain conversation with long-term persona memory. arXiv preprint arXiv:2203.05797 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.178, + 0.287, + 0.881, + 0.312 + ], + "angle": 0, + "content": "[151] Yiyan Xu, Jinghao Zhang, Alireza Salemi, Xinting Hu, Wenjie Wang, Fuli Feng, Hamed Zamani, Xiangnan He, and Tat-Seng Chua. 2025. Personalized Generation In Large Model Era: A Survey. arXiv preprint arXiv:2503.02614 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.178, + 0.313, + 0.881, + 0.337 + ], + "angle": 0, + "content": "[152] Hao Yu, Xin Yang, Xin Gao, Yan Kang, Hao Wang, Junbo Zhang, and Tianrui Li. 2024. Personalized federated continual learning via multi-granularity prompt. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 4023-4034." + }, + { + "type": "ref_text", + "bbox": [ + 0.178, + 0.338, + 0.881, + 0.362 + ], + "angle": 0, + "content": "[153] Xiaoyan Yu, Tongxu Luo, Yifan Wei, Fangyu Lei, Yiming Huang, Hao Peng, and Liehuang Zhu. 2024. Neeko: Leveraging dynamic lora for efficient multi-character role-playing agent. arXiv preprint arXiv:2402.13717 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.178, + 0.363, + 0.881, + 0.387 + ], + "angle": 0, + "content": "[154] Xinfeng Yuan, Siyu Yuan, Yuhan Cui, Tianhe Lin, Xintao Wang, Rui Xu, Jiangjie Chen, and Deqing Yang. 2024. Evaluating character understanding of large language models via character profiling from fictional works. arXiv preprint arXiv:2404.12726 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.178, + 0.388, + 0.881, + 0.412 + ], + "angle": 0, + "content": "[155] Hansi Zeng, Surya Kallumadi, Zaid Alibadi, Rodrigo Nogueira, and Hamed Zamani. 2023. A personalized dense retrieval framework for unified information access. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval. 121-130." + }, + { + "type": "ref_text", + "bbox": [ + 0.178, + 0.413, + 0.881, + 0.437 + ], + "angle": 0, + "content": "[156] Saber Zerhoudi and Michael Granitzer. 2024. PersonaRAG: Enhancing Retrieval-Augmented Generation Systems with User-Centric Agents. arXiv preprint arXiv:2407.09394 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.178, + 0.438, + 0.881, + 0.475 + ], + "angle": 0, + "content": "[157] Han Zhang, Songlin Wang, Kang Zhang, Zhiling Tang, Yunjiang Jiang, Yun Xiao, Weipeng Yan, and Wen-Yun Yang. 2020. Towards personalized and semantic retrieval: An end-to-end solution for e-commerce search via embedding learning. In Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval. 2407-2416." + }, + { + "type": "ref_text", + "bbox": [ + 0.178, + 0.476, + 0.773, + 0.488 + ], + "angle": 0, + "content": "[158] Jiarui Zhang. 2024. Guided profile generation improves personalization with llms. arXiv preprint arXiv:2409.13093 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.178, + 0.489, + 0.881, + 0.513 + ], + "angle": 0, + "content": "[159] Jesse Zhang, Jiahui Zhang, Karl Pertsch, Ziyi Liu, Xiang Ren, Minsuk Chang, Shao-Hua Sun, and Joseph J Lim. [n.d.]. Bootstrap Your Own Skills: Learning to Solve New Tasks with Large Language Model Guidance. In 7th Annual Conference on Robot Learning." + }, + { + "type": "ref_text", + "bbox": [ + 0.178, + 0.514, + 0.881, + 0.538 + ], + "angle": 0, + "content": "[160] Kai Zhang, Yangyang Kang, Fubang Zhao, and Xiaozhong Liu. 2023. LLM-based medical assistant personalization with short-and long-term memory coordination. arXiv preprint arXiv:2309.11696 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.178, + 0.539, + 0.881, + 0.563 + ], + "angle": 0, + "content": "[161] Kaiyan Zhang, Jianyu Wang, Ermo Hua, Biqing Qi, Ning Ding, and Bowen Zhou. 2024. Cogenesis: A framework collaborating large and small language models for secure context-aware instruction following. arXiv preprint arXiv:2403.03129 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.178, + 0.565, + 0.881, + 0.588 + ], + "angle": 0, + "content": "[162] Kai Zhang, Fubang Zhao, Yangyang Kang, and Xiaozhong Liu. 2023. Memory-augmented llm personalization with short-and long-term memory coordination. arXiv preprint arXiv:2309.11696 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.178, + 0.589, + 0.881, + 0.626 + ], + "angle": 0, + "content": "[163] Wenlin Zhang, Chuhan Wu, Xiangyang Li, Yuhao Wang, Kuicai Dong, Yichao Wang, Xinyi Dai, Xiangyu Zhao, Huifeng Guo, and Ruiming Tang. 2025. LLMTreeRec: Unleashing the Power of Large Language Models for Cold-Start Recommendations. In Proceedings of the 31st International Conference on Computational Linguistics. 886-896." + }, + { + "type": "ref_text", + "bbox": [ + 0.178, + 0.627, + 0.881, + 0.651 + ], + "angle": 0, + "content": "[164] Yanyue Zhang, Yulan He, and Deyu Zhou. 2025. Rehearse With User: Personalized Opinion Summarization via Role-Playing based on Large Language Models. arXiv preprint arXiv:2503.00449 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.178, + 0.652, + 0.881, + 0.676 + ], + "angle": 0, + "content": "[165] You Zhang, Jin Wang, Liang-Chih Yu, Dan Xu, and Xuejie Zhang. 2024. Personalized LoRA for human-centered text understanding. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 38. 1958-19596." + }, + { + "type": "ref_text", + "bbox": [ + 0.178, + 0.677, + 0.881, + 0.702 + ], + "angle": 0, + "content": "[166] Yabin Zhang, Wenhui Yu, Erhan Zhang, Xu Chen, Lantao Hu, Peng Jiang, and Kun Gai. 2024. Recgpt: Generative personalized prompts for sequential recommendation via chatgpt training paradigm. arXiv preprint arXiv:2404.08675 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.178, + 0.703, + 0.881, + 0.727 + ], + "angle": 0, + "content": "[167] Zeyu Zhang, Xiaohe Bo, Chen Ma, Rui Li, Xu Chen, Quanyu Dai, Jieming Zhu, Zhenhua Dong, and Ji-Rong Wen. 2024. A survey on the memory mechanism of large language model based agents. arXiv preprint arXiv:2404.13501 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.178, + 0.728, + 0.881, + 0.752 + ], + "angle": 0, + "content": "[168] Zhehao Zhang, Ryan A Rossi, Branislav Kveton, Yijia Shao, Diyi Yang, Hamed Zamani, Franck Dernoncourt, Joe Barrow, Tong Yu, Sungchul Kim, et al. 2024. Personalization of large language models: A survey. arXiv preprint arXiv:2411.00027 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.178, + 0.753, + 0.881, + 0.777 + ], + "angle": 0, + "content": "[169] Yi Zheng, Chongyang Ma, Kanle Shi, and Haibin Huang. 2023. Agents meet okr: An object and key results driven agent system with hierarchical self-collaboration and self-evaluation. arXiv preprint arXiv:2311.16542 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.178, + 0.778, + 0.881, + 0.802 + ], + "angle": 0, + "content": "[170] Hanxun Zhong, Zhicheng Dou, Yutao Zhu, Hongjin Qian, and Ji-Rong Wen. 2022. Less is more: Learning to refine dialogue history for personalized dialogue generation. arXiv preprint arXiv:2204.08128 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.178, + 0.803, + 0.881, + 0.827 + ], + "angle": 0, + "content": "[171] Wanjun Zhong, Duyu Tang, Jiahai Wang, Jian Yin, and Nan Duan. 2021. UserAdapter: Few-shot user learning in sentiment analysis. In Findings of the Association for Computational Linguistics: ACL-JJCNLP 2021. 1484-1488." + }, + { + "type": "list", + "bbox": [ + 0.177, + 0.124, + 0.882, + 0.827 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.523, + 0.832, + 0.536, + 0.841 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.117, + 0.091, + 0.348, + 0.103 + ], + "angle": 0, + "content": "A Survey of Personalization: From RAG to Agent" + }, + { + "type": "header", + "bbox": [ + 0.54, + 0.091, + 0.822, + 0.102 + ], + "angle": 0, + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.124, + 0.822, + 0.147 + ], + "angle": 0, + "content": "[172] Dong Zhou, Séamus Lawless, and Vincent Wade. 2012. Improving search via personalized query expansion using social media. Information retrieval 15 (2012), 218-242." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.149, + 0.822, + 0.173 + ], + "angle": 0, + "content": "[173] Denny Zhou, Nathanael Scharli, Le Hou, Jason Wei, Nathan Scales, Xuezhi Wang, Dale Schuurmans, Claire Cui, Olivier Bousquet, Quoc Le, et al. 2022. Least-to-most prompting enables complex reasoning in large language models. arXiv preprint arXiv:2205.10625 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.175, + 0.821, + 0.198 + ], + "angle": 0, + "content": "[174] Yujia Zhou, Qiannan Zhu, Jiajie Jin, and Zhicheng Dou. 2024. Cognitive personalized search integrating large language models with an efficient memory mechanism. In Proceedings of the ACM Web Conference 2024. 1464-1473." + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.2, + 0.821, + 0.224 + ], + "angle": 0, + "content": "[175] Yuchen Zhuang, Haotian Sun, Yue Yu, Rushi Qiang, Qifan Wang, Chao Zhang, and Bo Dai. [n.d.]. Hydra: Model factorization framework for black-box llm personalization, 2024. URL https://arxiv.org/abs/2406.02888 ([n.d.])." + }, + { + "type": "list", + "bbox": [ + 0.118, + 0.124, + 0.822, + 0.224 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.239, + 0.504, + 0.251 + ], + "angle": 0, + "content": "Received 20 February 2007; revised 12 March 2009; accepted 5 June 2009" + }, + { + "type": "page_number", + "bbox": [ + 0.463, + 0.832, + 0.475, + 0.841 + ], + "angle": 0, + "content": "25" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10147/26499afc-f1b2-4507-8b62-4adc6ac17e5f_origin.pdf b/data/2025/2504_10xxx/2504.10147/26499afc-f1b2-4507-8b62-4adc6ac17e5f_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..5d39baa179e1cdb0494c8964c6dceadb221fd009 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10147/26499afc-f1b2-4507-8b62-4adc6ac17e5f_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97329609b909a06225fc6f112666706d10e5b3ceb1d8907638249f0b3a618c65 +size 2003619 diff --git a/data/2025/2504_10xxx/2504.10147/full.md b/data/2025/2504_10xxx/2504.10147/full.md new file mode 100644 index 0000000000000000000000000000000000000000..949c90d427bd9450e1cd62d1363fd267071298b2 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10147/full.md @@ -0,0 +1,460 @@ +# A Survey of Personalization: From RAG to Agent + +XIAOPENG LI*, City University of Hong Kong, Hong Kong + +PENGYUE JIA*, City University of Hong Kong, Hong Kong + +DERONG XU, City University of Hong Kong, Hong Kong and University of Science and Technology of China, China + +YI WEN, City University of Hong Kong, Hong Kong + +YINGYI ZHANG, City University of Hong Kong, Hong Kong and Dalian University of Technology, China + +WENLIN ZHANG, City University of Hong Kong, Hong Kong + +WANYU WANG, City University of Hong Kong, Hong Kong + +YICHAO WANG, Noah's Ark Lab, Huawei, China + +ZHAOCHENG DU, Noah's Ark Lab, Huawei, China + +XIANGYANG LI, Noah's Ark Lab, Huawei, China + +YONG LIU, Noah's Ark Lab, Huawei, Singapore + +HUIFENG GUO, Noah's Ark Lab, Huawei, China + +RUIMING TANG†, Noah's Ark Lab, Huawei, China + +XIANGYU ZHAO†, City University of Hong Kong, Hong Kong + +Personalization has become an essential capability in modern AI systems, enabling customized interactions that align with individual user preferences, contexts, and goals. Recent research has increasingly concentrated on Retrieval-Augmented Generation (RAG) frameworks and their evolution into more advanced agent-based architectures within personalized settings to enhance user satisfaction. Building on this foundation, this survey systematically examines personalization across the three core stages of RAG: pre-retrieval, retrieval, and generation. Beyond RAG, we further extend its capabilities into the realm of Personalized LLM-based Agents, which enhance traditional RAG systems with agentic functionalities, including user understanding, personalized planning and execution, and dynamic generation. For both personalization in RAG and agent-based personalization, we provide formal definitions, conduct a comprehensive review of recent literature, and summarize key datasets and evaluation metrics. Additionally, we discuss fundamental challenges, limitations, and promising research directions in this evolving field. Relevant papers and resources are continuously updated at the Github Repo1. + +CCS Concepts: $\cdot$ Information systems $\rightarrow$ Personalization. + +Additional Key Words and Phrases: Large Language Model, Retrieval-Augmented Generation, Agent, Personalization + +$^{1}$ https://github.com/Applied-Machine-Learning-Lab/Awesome-Personalized-RAG-Agent +* Equal contribution. +† Corresponding authors. + +Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org. + +© 2018 Copyright held by the owner/author(s). Publication rights licensed to ACM. + +Manuscript submitted to ACM + +# ACM Reference Format: + +Xiaopeng Li*, Pengyue Jia*, Derong Xu, Yi Wen, Yingyi Zhang, Wenlin Zhang, Wanyu Wang, Yichao Wang, Zhaocheng Du, Xiangyang Li, Yong Liu, Huifeng Guo, Ruiming Tang†, and Xiangyu Zhao†. 2018. A Survey of Personalization: From RAG to Agent. In Proceedings of Make sure to enter the correct conference title from your rights confirmation email (Conference acronym 'XX). ACM, New York, NY, USA, 25 pages. https://doi.org/XXXXXXXX.XXXXXXXXXX + +# 1 INTRODUCTION + +Large Language Models (LLMs) have revolutionized AI-driven applications by enabling natural language understanding and generation at an unprecedented scale. However, these models often suffer from issues such as outdated responses and hallucinations, which severely hinder the accuracy of information generation. Retrieval-Augmented Generation (RAG) has emerged as a promising framework that integrates retrieved information from external corpora, such as external APIs [13, 36], scientific repositories [86, 124] or domain-specific databases [4, 31], ensuring more knowledge-grounded and up-to-date outputs. + +Its versatility has led to significant applications across various domains, including question answering [115], enterprise search [16] and healthcare [143], etc. Among these applications, one particularly notable area is in agent workflows, where RAG enhances autonomous systems by providing context-aware, dynamically retrieved, and reliable knowledge. This is because each stage of the RAG process closely mirrors key aspects of an agent's workflow, as shown in Figure 1. For instance, the query rewriting phase in RAG, which involves semantic understanding and parsing, aligns with the semantic comprehension stage in agent workflows. Likewise, RAG's retrieval phase, which focuses on extracting the most relevant documents, corresponds to the planning and execution phases of an agent, where decisions are made based on retrieved knowledge. Finally, the generation phase in RAG parallels an agent's execution stage, where actions are performed based on the given task. This structural alignment suggests that the architecture of RAG is fundamentally converging with agent workflows, solidifying its position as a key facilitator of intelligent and autonomous systems. + +Although the structural alignment between RAG and agent workflows highlights their deepening convergence, a critical next step in enhancing these intelligent systems lies in personalization. Personalization is a key driver toward achieving more adaptive and context-aware AI, which is fundamental for the progression toward Artificial General Intelligence (AGI). It plays an essential role in applications such as personalized reasoning [39, 149], adaptive decision-making [72], user-specific content generation [109, 151], and interactive AI systems [73, 92]. However, existing research lacks a comprehensive comparative analysis of personalized RAG and agentic approaches. Current surveys primarily focus on general RAG methodologies [32, 35] or agent-related literature [63, 131, 167], without systematically exploring their implications for personalization. While recent works such as [68, 168] discuss personalization, they predominantly address personalized generation within LLMs or specific downstream tasks, overlooking how personalization can be effectively integrated into RAG and agent workflows. + +Motivated by the above issues, this survey aims to provide a comprehensive review of the integration of personalization into RAG and agentic RAG frameworks to enhance user experiences and optimize satisfaction. The key contributions of this work can be summarized as follows: + +- We provide an extensive exploration of the existing literature on how personalization is integrated into various stages of RAG (pre-retrieval, retrieval, and generation) and agentic RAG (understanding, planning, execution, and generation). +- We summarize the key datasets, benchmarks, and evaluation metrics used in existing research for each subtask to facilitate future studies in the respective domains. + +![](images/eddf6ee96c6208637db90f8b20f4af142d11c804484f15c3dc8e58752b900628.jpg) +Fig. 1. Correlation between personalization and RAG with agent flow. + +- We also highlight the limitations of current research and suggest future directions for personalized RAG, emphasizing potential advancements to address existing challenges. + +The outline of this survey is as follows: we introduce what is personalization (Sec. 2) and explain how personalization is adopted into RAG pipeline (Sec. 3). Then, we present a literature review on where to integrate personalization within different stages of RAG and agentic RAG workflows (Sec. 4) and discuss the key datasets and evaluation metrics used in existing research (Sec.5). Lastly, we present a discussion on the limitations of current research and future directions (Sec. 6). + +# 2 WHAT IS PERSONALIZATION + +Personalization in current research refers to the tailoring of model predictions or generated content to align with an individual's preferences. In the context of RAG and agents, personalization involves incorporating user-specific information at various stages of the RAG pipeline or within agents. User personalization can be categorized into the following types: + +- Explicit User Profile: Explicitly presented user information, including biographical details, attributes (e.g., age, location, gender, education), and social connections (e.g., social networks). +- User Historical Interactions: Behavioral data, including browsing history, clicks, and purchases, which help infer user interests and preferences to improve personalization. + +Table 1. Overview of Personalized RAG and Agent. + +
FieldSub-fieldSubsub-fieldPapers
Pre-retrievalQuery RewritingLearning to Personalized Query RewriteCLE-QR [60], CGF [38], PEARL [80]
LLM to Personalized Query RewriteLeast-to-Most Prompting [173], ERAGent [112], CoPS [174], Agent4Ranking [61], FIG [22], BASES [99]
Query ExpansionTagging-based query expansionGossiple [10], Biancalana and Micarelli [12], SoQuES [15], Zhou et al. [172]
ElseLin and Huang [66], Bender et al. [9], Axiomatic PQEC [79], WE-LM [144], PSQE [14], PQEWC [7]
OthersBobo [33], Kannadasan and Aslanyan [52], PSQE [8]
RetrievalIndexingPEARL [80], KG-Retriever [21], EMG-RAG [137], PGraphRAG [5]
RetrievalDense RetrievalMeMemo [138], RECAP [71], LAPDOG [43], Gu et al. [37], PersonalLM [77], UIA [155], XPERT [125], DPSR [157], RTM [11], Pearl [80], MemPrompt [74], EERRA [23], MALP [160], USER-LLM [84], PER-PCS [120]
Sparse RetrievalOPPU [121], PAG [101], Au et al. [5], UniMS-RAG [128], Deng et al. [29],
Prompt-based RetrievalLAPS [50], UniIMP [140], Shen et al. [111]
OthersSalem et al. [103], PersonalITM [65], Zhang et al. [165]
Post-retrievalPersonaRAG [156], Pavliukevich et al. [89], UniMS-RAG [128], Salemi and Zamani [106], Zhang et al. [164], AutoCompressors [24], FIT-RAG [76]
GenerationGeneration from Explicit PreferencesDirect PromptingP² [49], Character Profiling [154] OpinionQA [107], Kang et al. [51], Liu et al. [67], Cue-CoT [129], TICL [26]
Profile-Augmented PromptingGPG [158], Richardson et al. [101], ONCE [70], LLMTreeRec [163], KAR [145], Matryoshka [58]
Personalized-Prompt PromptingLi et al. [57], RecGPT [166], PEPLER-D [59], GRAPA [94], SGPT [28], PFCL [152]
Generation from Implicit PreferencesFine-tuning-Based MethodsPLoRA [165], LM-P [142], MiLP [165], OPPU [122], PER-PCS [120], Review-LLM [91], UserIdentifier [78], UserAdapter [171], HYDRA [175], PocketLLM [90], CoGenesis [161]
Reinforcement Learning-Based MethodsP-RLHF [62], P-SOUPS [47], PAD [20], REST-PG [104], Salemi et al. [103], RewrimerSIRI [57],Kulkarni et al. [54]
From RAG to AgentPersonalized UnderstandingIn user-profile understandingXu et al. [148], Abbasian et al. [2],
In agent's role understandingRoleLLM [139], Character-LLM [110], Wang et al. [134],
In agent's user-role joint understandingSocialBench [18], Dai et al. [27], Ran et al. [96], Wang et al. [126], Tu et al. [123], Necko [153]
Personalized Planning and ExecutionMemory ManagementEMG-RAG [137], Park et al. [87], Abbasian et al. [2], RecAgent [133], TravelPlanner+ [114], PersonalWAB [17], VOYAGER [127], MemoeryLLM [136]
Tool and API CallingVOYAGER [127], Zhang et al. [159], PUMA [17], Wang et al. [126], PenetrativeAI [148], Huang et al. [44], [87], MetaGPT [40], OKR-Agent [169]
Personalized GenerationAlignment with User FactCharacter-LLM [110], Wang et al. [135], Dai et al. [27]
Alignment with User PreferencesWang et al. [139], Ran et al. [96], Wang et al. [134], Chen et al. [18]
+ +- User Historical Content: Implicit personalization derived from user-generated content, such as chat history, emails, reviews, and social media interactions. +- Persona-Based User Simulation: The use of LLM-based agents to simulate and generate personalized interactions. + +Integrating this personalized information at various stages of the RAG and agent workflows enables dynamic alignment with human preferences, thereby making responses more user-centric and adaptive. + +# 3 HOW TO ADOPT PERSONALIZATION + +We define the process of introducing personalization within the RAG pipeline as follows: + +$$ +g = \mathcal {G} (\mathcal {R} (Q (q, p), C, p), \text {p r o m p t}, p, \theta) \tag {1} +$$ + +where $p$ denotes personalized information, and the process unfolds in three steps. In the pre-retrieval phase, query processing $(Q)$ refines the query $q$ using personalized information, such as through query rewriting or expansion. During the retrieval phase, the retriever $(\mathcal{R})$ leverages $p$ to fetch relevant documents from the corpus $(C)$ . Finally, in the generation phase, the retrieved information, combined with $p$ and structured using the given prompt, id fed into the generator $(\mathcal{G})$ with parameter $\theta$ to produce the final response $g$ . It is evident that personalized information directly influences multiple stages of the RAG pipeline. In this survey, we consider the agent system as a specialized application of the RAG framework, where personalization is incorporated in a manner similar to the RAG framework. + +![](images/8f9e5a0db9555c1a4ed96186bb98667b2c4fd2abae37751b9dee8a95b60f65c1.jpg) +Fig. 2. Overview of the personalized pre-retrieval stage. + +# 4 WHERE TO ADOPT PERSONALIZATION + +# 4.1 Pre-retrieval + +4.1.1 Definition. Pre-retrieval is a crucial step in information retrieval systems, where the original user query is enhanced or modified before the retrieval process to improve the relevance and quality of the search results, as shown in Figure 2. This process often incorporates additional contextual or personalized information to better align the query with the user's intent. The process can be formalized as follows: + +$$ +q ^ {*} = Q (q, p) \tag {2} +$$ + +where $p$ and $q$ denote the personalized information and original query, and $q^{*}$ is the optimized query after query reformulation. + +4.1.2 Query Rewriting. Query rewriting in RAG at the pre-retrieval stage refers to the process of reformulating user queries to enhance retrieval effectiveness by improving relevance, disambiguating intent, or incorporating contextual information before retrieving documents from an external knowledge source. The literature on personalized query rewriting can be broadly classified into two primary categories: (1) Direct Personalized Query Rewriting and (2) Auxiliary Personalized Query Rewriting. + +(1). Direct Personalized Query Rewriting. The first category focuses on personalized query rewriting by using direct models. For example, Cho et al. [25] presents a personalized search-based query rewrite system for conversational AI that addresses user-specific semantic and phonetic errors. Nguyen et al. [82] apply reinforcement learning techniques to improve query rewriting in online e-commerce systems, leveraging distilled LLMs for personalized performance. CLE-QR [60] explores query rewriting in Taobao's search engine to enhance user satisfaction through customized query adaptation. CGF [38] introduces a constrained generation framework that allows for more flexible and personalized query rewriting in conversational AI. Li et al. [57] investigate learning methods to rewrite prompts for personalized + +text generation, improving the relevance and engagement of AI-generated content. Additionally, PEARL [80] discusses personalizing large language model-based writing assistants through the integration of generation-calibrated retrievers, enhancing AI-generated content. + +(2). Auxiliary Personalized Query Rewriting. The second category emphasizes personalized query rewriting by using auxiliary mechanisms, such as retrieval, reasoning strategies, and external memory. Zhou et al. [173] propose a least-to-most prompting strategy that aids in complex reasoning within LLMs, which can be adapted for personalized text generation. ERAGent [112] enhance retrieval-augmented LLMs to improve personalization, efficiency, and accuracy, indirectly supporting personalized query rewriting for content generation. CoPS [174] integrate LLMs with memory mechanisms to create more personalized search experiences, which also influences content generation through better query understanding. Further, Agent4Ranking [61] employs multi-agent LLMs to perform semantic robust ranking, including personalized query rewriting to improve search rankings. FIG [22] combine graph-based methods with LLMs to query rewrite, improving personalized content generation and conversational interactions. Lastly, BASES [99] employ LLM-based agents to simulate large-scale web search user interactions, contributing to the development of personalized query rewriting strategies for content generation. + +4.1.3 Query Expansion. Query expansion enhances retrieval systems by expanding a user's original query with additional terms, synonyms, or refined structure to better capture intent. This improves the relevance and scope of retrieved documents. Recent advancements in LLMs have reinvigorated this field [46, 48, 132], leveraging their comprehension and generation abilities to expand queries using encoded knowledge or external retrieval, with notable success. Personalized query expansion, a subset, incorporates user-specific data to tailor results, boosting performance and customizing the search experience. + +(1). Tagging-based Query Expansion. By 2009, studies began incorporating tagging information to enhance personalized query expansion. For instance, Gossle [10] introduced the TagMap and TagRank algorithms, which dynamically selected tags from personalized networks constructed using the cosine similarity of user-item tag distances, improving recall performance. Similarly, Biancalana and Micarelli [12] recorded user queries and visited URLs, leveraging social bookmarking to extract relevant tags and build a personalized three-dimensional co-occurrence matrix. Based on this, multiple semantically categorized expanded queries were generated to better reflect user interests. Further advancements include SoQuES [15], which integrated tag semantic similarity with social proximity, and a graph-based approach [172] that utilized Tag-Topic models and pseudo-relevance feedback for term weighting, tailoring the expansion process to individual user preferences. +(2). Else. Apart from tagging-based techniques, early research on Personalized Query Expansion primarily focused on modeling user personalization based on search history [66], social networks, or preferences derived from friendship networks [9]. The Axiomatic PQEC framework [79] formalized expansion rules using both local (user behavior-driven) and social (network-driven) strategies. In 2017, WE-LM [144] advanced this paradigm by modeling multi-relational networks with word embeddings across tag-word relationships, refining associations through affinity graphs. Later, PSQE [14] further improved tagging-based methods using utf-iuf user profiling, integrating a tag similarity graph with user profiles in the online phase to compute expansion terms relevant to user interests in real-time, achieving dynamic personalized expansion. In addition, PQEWC [7] leveraged clustering and contextual word embeddings to optimize query expansions dynamically. + +![](images/431fadc2fb6b837e6448de02affe85f7a85af23cf964b4cebff641487aab910d.jpg) +Fig. 3. Overview of the personalized retrieval stage. + +4.1.4 Others. Besides query rewriting and query expansion, other personalized query-related research focuses on areas like query disambiguation and query auto-completion [116]. Bobo [33] allows users to input contextual terms reflecting their domain knowledge. In 2019, a method [52] applied fastText embeddings from recent queries to rank candidates. In addition, PSQE [8] employed synthetic user profiles from Wikipedia and word2vec embeddings for query disambiguation. + +4.1.5 Discussion. While both query rewriting and query expansion aim to align user input with system understanding to enhance retrieval quality, their roles in personalization differ in fundamental ways. Understanding the distinct operational characteristics and application scenarios of each technique is essential for designing effective personalized retrieval systems. The key takeaways are listed as follows: + +- Query rewriting is most beneficial when the original query is ambiguous, underspecified, or misaligned with retrieval intents, particularly in conversational or multi-turn settings. +- Query expansion is most effective when the original query is relevant but incomplete - i.e., when it needs to be semantically broadened to cover additional relevant concepts. + +# 4.2 Retrieval + +4.2.1 Definition. The retrieval process involves finding the most relevant documents $D^{*}$ from a corpus $C$ based on a query $q^{*}$ , as shown in Figure 3. To incorporate personalization, additional user-specific information $p$ is integrated into the retrieval function $\mathcal{R}$ . This allows the retrieval process to tailor the selected documents to align with individual user preferences or contexts, thereby enhancing the relevance and personalization of the generated outputs. + +$$ +D ^ {*} = \mathcal {R} (q ^ {*}, C, p) \tag {3} +$$ + +In the retrieval process, personalization can primarily be introduced by focusing on three steps: indexing, retrieval, and post-retrieval. These steps ensure efficient and accurate retrieval of relevant documents or knowledge, while tailoring the process to individual user preferences. Below, we provide a detailed explanation of each step. + +4.2.2 Indexing. Indexing organizes knowledge base data into a structured format to facilitate efficient retrieval. Within the RAG pipeline, documents are either chunked or entirely encoded into representations before being integrated into searchable systems [30, 117]. Conventional encoding methods employ either sparse encoding techniques (e.g., + +TF-IDF [95], BM25 [102]) or dense encoding approaches leveraging pre-trained models, such as BERT [1], Siamese Encoders [98], or LLM-based encoders [64, 147]. + +To introduce personalization at the indexing stage, PEARL [80] generates user embeddings by encoding personal history data with models like DeBERTa. These embeddings are subsequently clustered to create personalized shared indices. Other approaches integrate knowledge graphs into indexing to enhance retrieval performance. For example, KG-R retriever [21] employs a Hierarchical Index Graph, consisting of a knowledge graph layer and a collaborative document layer, to improve RAG retrieval. EMG-RAG [137] incorporates personalized memory within an editable knowledge graph, enabling dynamic retrieval. Similarly, PGraphRAG [5] leverages user-centric knowledge graphs to enhance personalization in retrieval tasks. + +4.2.3 Retrieval. The Retrieval step matches a user query with the indexed knowledge base to fetch relevant candidates. It can be broadly categorized into four different types: (1) Dense Retrieval, (2) Sparse Retrieval, (3) Prompt-based Retrieval, and (4) Others. + +(1). Dense Retrieval. Dense retrieval methods often use vector embeddings and similarity metrics (e.g., cosine similarity) and achieve personalization by encoding user preferences, context, or interactions into query or document embeddings, enabling tailored results through similarity-based matching. For instance, MeMemo [138] retrieves personalized information by matching user-specific embeddings with document vectors, focusing on private, on-device text generation. Similarly, RECAP [71] and LAPDOG [43] enhance personalized dialogue generation by encoding queries and user profiles as dense vectors and retrieving top-N results, ensuring user-specific context drives the responses. In chatbots, Gu et al. [37] integrates conversational context and user profiles to align retrieved responses with user personas. PersonalM [77] employs group-wise contrastive learning, training its retrieval model to align user queries with domain-specific text fragments, thereby improving personalization. UIA [155] employs dual encoders to retrieve documents tailored to user preferences. XPERT [125] incorporates temporal events and user interactions into embeddings, enabling large-scale retrieval across millions of items. + +Dense retrieval also enhances specific applications like e-commerce, medical assistance, and language models. DPSR [157] and RTM [11] encode user queries and product information to personalize product searches dynamically. Pearl [80] and MemPrompt [74] retrieve personalized content by leveraging historical user data and memory-assisted mechanisms. EERRA [23] uses review embeddings as dense queries for recommendations. In medical assistance, MALP [160] and User-LLM [84] integrate short- and long-term user interactions into embeddings for contextualized, personalized responses. Finally, PER-PCS [120] retrieves relevant information using individual user histories, enhancing the personalization capabilities of large language models. + +(2). Sparse Retrieval. Sparse retrieval methods often rely on term-based matching (e.g., BM25) and apply personalization by assigning higher weights to terms or keywords that are more relevant to the user. OPPU [121] uses the BM25 algorithm to select the k most relevant records from the user's historical data for the current query. Similarly, PAG [101] incorporates user input and profiles to enhance summarization and retrieval, aligning sparse representations with personalization objectives for large language models. Au et al. [5] uses BM25 search algorithms to find entries related to the target user or neighboring users through the graph structure. UniMS-RAG [128] combines sparse and dense retrieval by leveraging multi-source knowledge, such as dialogue context and user images, to refine personalized responses in dialogue systems. Lastly, Deng et al. [29] apply sparse retrieval to support fact-based queries, considering user queries and preferences to enhance answer generation for e-commerce applications. + +(3). Prompt-based Retrieval. Prompt-based retrieval leverages prompts to guide retrieval from the model or external sources and introduces personalization by crafting user-specific prompts that guide the retrieval process. These prompts may include explicit user preferences, historical interactions, or detailed instructions that reflect the user's unique requirements. By embedding this personalized context directly into the prompt, the retrieval process can dynamically adjust to capture and return results that are most relevant to the user. LAPS [50] focuses on multi-session conversational search by storing user preferences and dialogue context, then using prompts to retrieve relevant information tailored to the user's biases and categories of interest. UniMP [140] employs user interaction histories as input to prompt-based retrieval, enabling personalized recommendations for multi-modal tasks, such as vision-language applications, by aligning prompts with user behavioral data. In contrast, Shen et al. [111] explores the use of LLMs to extract empathy and narrative styles from user-provided stories, but this work primarily focuses on style extraction and does not explicitly involve a retrieval component. +(4). Others. Reinforcement learning-based retrieval personalizes the process by optimizing retrieval policies based on user feedback, learning user preferences over time to adjust strategies. Salemi et al. [103] combines models like BM25, RbR, and dense retrieval, refining them with reinforcement learning (RL) and knowledge distillation (KD) to adapt to user profiles for personalized outputs. Parameter-based retrieval leverages pre-trained model parameters to implicitly store and retrieve user-specific information, allowing direct retrieval from the model without traditional indices. PersonalTM [65] generates document identifiers (Document IDs) using a Transformer model, encoding query, history, and document relationships into its parameters for personalization. Similarly, Zhang et al. [165] uses parameterized representations to integrate user queries and histories, tailoring responses to individual preferences. + +4.2.4 Post-retrieval. Current Post-Retrieval methods primarily focus on refining retrieved documents or responses to improve relevance and coherence, current methodologies could be categorized into three parts (1) Re-ranking, (2) Summarization, and (3) Compression. + +(1). Re-ranking. Re-ranking enhances personalized content generation by prioritizing more relevant documents at the top. PersonaRAG [156] extends RAG by integrating user-centric agents, such as the Live Session Agent and the Document Ranking Agent, to refine document ranking and improve overall performance. Pavliukevich et al. [89] propose a cross-encoder BERT model for re-ranking external knowledge within a personalized context. UniMS-RAG [128] introduces a scoring mechanism that evaluates retrieved documents and outputs by optimizing the retriever. Besides, it includes an evidence attention mask, enabling re-ranking during inference and applying it to personalized datasets. Salemi and Zamani [106] present an iterative approach to optimizing ranking results based on the expectation-maximization algorithm, with performance validated in personalized scenarios. +(2). Summarization. Summarization refers to the process of summarizing retrieved information to enhance performance. For instance, Zhang et al. [164] introduced a role-playing agent system to summarize retrieved history in order to improve the final Personalized Opinion Summarization process. +(3). Compression. Compression involves condensing embeddings or retrieved content to enhance efficiency and effectiveness. Approaches like AutoCompressor [24] compress contextual embeddings into shorter semantic representations, and FIT-RAG [76] introduces a self-knowledge recognizer along with a sub-document-level token reduction mechanism to minimize tokens within RAG pipeline. However, few studies have specifically explored personalized fields, highlighting a promising direction for future research. + +4.2.5 Discussion. Indexing, retrieval, and post-retrieval methods each play a critical role in ensuring efficient and personalized information processing, with specific applications and trade-offs. Indexing focuses on organizing knowledge bases for efficient retrieval, using techniques such as sparse encoding methods like TF-IDF and BM25, which are efficient but limited in understanding semantics, and dense encoding methods like BERT and DeBERTa, which provide better semantic understanding but require significant computational resources. These methods are widely used in tasks like question answering and personalized recommendation systems. Retrieval involves matching user queries with relevant documents and can be categorized into dense retrieval, which provides high semantic understanding and personalization but is computationally expensive; sparse retrieval, which is efficient and interpretable but less capable of handling semantics; prompt-based retrieval, which is highly flexible and adaptable to user needs but requires careful engineering of prompts; and advanced methods like reinforcement learning-based approaches, which dynamically adapt to user feedback but are complex to implement. This step is essential in applications like personalized dialogue systems, search engines, and e-commerce. Post-retrieval methods refine retrieved results to enhance relevance and coherence through re-ranking, which improves personalization and prioritizes relevant content but increases computational overhead; summarization, which simplifies complex information for better user understanding but risks losing critical details; and compression, which reduces computational costs by condensing information but remains underexplored in personalized contexts. Together, these methods provide a comprehensive pipeline for delivering efficient, relevant, and personalized outputs, balancing their strengths in semantic understanding, relevance, and flexibility with challenges related to computational costs and implementation complexity. + +# 4.3 Generation + +4.3.1 Definition. Personalized generation incorporates user-specific retrieved documents $D^{*}$ , task-specific prompt prompt, and user preference information $p$ via the generator $\mathcal{G}$ parameterized by $\theta$ to produce tailored content $g^{*}$ aligned with individual preference, where the flow is shown in Figure 4. The generation process can be formulated as + +$$ +g ^ {*} = \mathcal {G} (D ^ {*}, \text {p r o m p t}, p, \theta). \tag {4} +$$ + +Personalized generation can be achieved by incorporating explicit and implicit preferences. Explicit preference-driven methodologies utilize direct input signals (e.g., $D^{*}$ , prompt, and $p$ ), to tailor outputs to specific user preferences. Conversely, implicit preference-encoded approaches embed personalized information within the parameters $\theta$ of the generator model, during training, thereby facilitating preference alignment without the necessity for explicit runtime inputs. + +4.3.2 Generation from Explicit Preferences. Integrating explicit preferences into LLMs facilitates personalized content generation. Explicit preference information encompasses user demographic information (e.g., age, occupation, gender, location), user behavior sequences (reflecting historical behavioral patterns), and user historical output texts (capturing writing style and tone preferences). The injection of explicit preferences for personalized generation can be categorized into three types: (1) Direct-integrated Prompting, (2) Summary-augmented Prompting, and (3) Adaptive Prompting. + +(1). Direct-integrated Prompting. Integrating user explicit preferences into language models through prompting enables the prediction of users' intent and behavioral patterns, facilitating personalized content generation. For instance, $\mathrm{P}^2$ [49], Character Profiling [154], and OpinionQA [107] integrate personalized data into LLMs through prompting for role-playing task, thereby aligning the model's responses with specified user profiles. Kang et al. [51] and Liu et al. [67] + +![](images/684289db43c065be977860260a5ca5599fa5f676c9663e41befff2d9bcc9c089.jpg) +Fig. 4. Overview of the personalized generation stage. + +integrate interaction histories into LLMs via prompting to predict user rating for candidate items. Cue-CoT [129] employs chain-of-thought reasoning to infer user needs from contextual cues, enabling personalized responses to in-depth dialogue questions. Additionally, TICL [26] proposes a trial-and-error framework that critiques initial LLM-generated responses, derives explanations and integrates these negative examples into prompts to improve personalization alignment. + +(2). Summary-augmented Prompting. Direct integration of personalized information via prompting struggles with ambiguous intent signals: Lengthy interaction histories introduce noise that obscures critical behavioral patterns [69], while sparse behavioral data lacks sufficient context for LLMs to derive meaningful user preferences. To address these issues, recent approaches focus on summarizing user personalized intents and integrating them into prompts. For instance, GPG [158] extracts key user habits and preferences from personal contexts, enabling fine-grained personalization. Similarly, LLMs are employed to generate task-specific summaries of user preferences, enhancing retrieval-augmented personalized generation capabilities [101]. In recommendation systems, ONCE [70], LLMTreeRec [163], and KAR [145] leverage historical user-item interactions to summarize user preferences. Furthermore, Matryoshka [58] generates user preference summaries by dynamically retrieving and synthesizing historical data. +(3). Adaptive Prompting. Manually designing personalized prompts demands both expert knowledge and significant labor, motivating the development of automated methods for personalized prompt generation. For example, Li et al. [57] trains a personalized prompt rewriter via supervised and reinforcement learning. RecGPT [166] and PEPLER-D [59] leverage prompt tuning to generate personalized prompts, enhancing sequential and explainable recommendations, respectively. GRAPA [94] integrates semantic and collaborative signals from user-item interaction graphs with graph neural networks to generate context-aware personalized prompts. SGPT [28] employs prompt tuning to jointly model common and group-specific patterns, bridging generalized and personalized federated learning paradigms. Furthermore, PFCL [152] achieves multi-granularity human preference modeling: coarse-grained prompts distill shared knowledge, while fine-grained prompts adapt to individual user characteristics. + +4.3.3 Generation from Implicit Preferences. Unlike explicit preference modeling, which captures user preferences through textual input, implicit preference-based methods incorporate personalization through internal parameters. This personalization is achieved either through Parameter-Efficient Fine-tuning (PEFT) techniques, such as LoRA [42], + +or reinforcement learning-based approaches for preference alignment [20, 57]. Based on these strategies, we classify existing methods into two categories: (1) Fine-tuning-Based Methods and (2) Reinforcement Learning-Based Methods. + +(1). Fine-tuning Based Methods. For fine-tuning methods, LoRA is the most widely adopted since it is resource-efficient and enables rapid adaptation without compromising model performance. PLoRA [165] introduces a personalized knowledge integration framework that combines task-specific LoRA with user-specific knowledge. Similarly, LM-P [142] personalizes information via LoRA by incorporating User ID as a personalization factor. MiLP [165] employs Bayesian optimization to determine the optimal personalization injection configuration, including LoRA settings, to effectively capture and utilize user-specific information. OPPU [122] and PER-PCS [120] follow a similar approach, leveraging user history data for fine-tuning LoRA-based personalization. However, PER-PCS differs by incorporating a gating module that selects the appropriate LoRA, enabling fine-grained personalization. Additionally, Review-LLM [91] integrates LoRA for supervised fine-tuning in the task of personalized review generation. + +Beyond LoRA-based approaches, alternative pipelines have been proposed for personalized generation. UserIdentifier [78] introduces a user-specific identifier, significantly reducing training costs while enhancing personalized demonstration. UserAdapter [171] proposes user-independent prefix embeddings, leveraging prefix tuning for personalization. Meanwhile, HYDRA [175] achieves implicit personalization by training user-specific headers. Recently, researchers have also explored fine-tuning personalized model on edge devices [90] and collaborative learning between small and large language models to enable more personalized generation [161]. + +(2). Reinforcement Learning Based Methods. Apart from fine-tuning based methods, recent research has explored reinforcement learning based techniques to personalize text generation by aligning outputs with user preferences. P-RLHF [62] has been proposed to jointly learn a user-specific and reward model to enable text generation that aligns with a user's styles or criteria. P-SOUPS [47] models multiple user preferences as a Multi-Objective Reinforcement Learning (MORL) problem, decomposing preferences into multiple dimensions, each trained independently. PAD [20] aligns text generation with human preferences during inference by utilizing token-level personalized rewards to guide the decoding process. REST-PG [104] introduces a framework that trains large language models to reason over personal data during response generation. This approach first generates reasoning paths to enhance the LLM's reasoning ability and then employs Expectation-Maximization Reinforced Self-Training to iteratively refine the model based on its high-reward outputs. Additionally, Salemi et al. [103] incorporate reinforcement learning into the RAG pipeline to improve retrieval accuracy, thereby enhancing the personalization of generated content. Other applications include RewriterSIRI [57], which has been introduced to generate text via RL-based personalized prompt rewriting using API-based LLMs, and Kulkarni et al. [54], who explore the use of reinforcement learning to optimize RAG for improving the relevance and coherence of chatbot responses in specialized domains, ultimately enhancing user satisfaction and engagement. + +4.3.4 Discussion. Personalized generation can be adopted via both explicit and implicit preference injection, yet they exhibit distinct characteristics that make them suitable for different scenarios. In explicit preference-based generation, personalization is clearly defined through user profile descriptions, contextual information, and similar inputs, which are incorporated into generators via prompts. A key advantage of this approach is explainability, as the personalized information is explicitly provided and easily traceable. Despite leveraging provided preferences and internal knowledge, explicit preference injection's personalization is constrained by model capabilities and irrelevant information interference. In contrast, implicit preference-based generation internalizes personalized information into + +![](images/c1f227eab52914058e52cd6428ef57f5faa6d829581489e901866e44230f1223.jpg) +Fig. 5. Overview of transition from personalized RAG to personalized agent. + +the generator's parameters through scene-specific personalized data, thereby adapting the model for more fine-grained personalization. However, these methods typically incur substantial training and computational costs, as they require fine-tuning the generator's internal parameters. Therefore, selecting between these approaches should be guided by the specific application scenario and resource constraints. + +# 4.4 From RAG to Agent + +4.4.1 Definition. A personalized LLM-based agent is a system designed to dynamically incorporate user context, memory, and external tools or APIs to support highly personalized and goal-oriented interactions [19, 45, 146], and solve problems in a goal-oriented manner [63, 113]. From the previously introduced stages of RAG, we observe that the evolution of personalized RAG reveals a structural convergence with agent architectures. We analyze them from three key perspectives: + +- Personalized Understanding: This phase within the agent parallels the query understanding and rewriting process of RAG as outlined in Section 4.1. However, it extends beyond static semantic parsing by incorporating dynamic user profiling [139] and role modeling [110]. This integration enables the agent to dynamically align interactions with implicit user preferences, facilitating personalized responses and task-specific adaptations [96]. + +- Personalized Planning and Execution: This phase in agents mirrors RAG's retrieval process in Section 4.2 yet it advances beyond static document retrieval by incorporating real-time memory management [87] and sophisticated tool and API calling [127]. This approach ensures the dynamic alignment of external knowledge with personalized constraints, such as integrating medical history in healthcare agents [2], to deliver context-aware and user-specific outcomes. +- Personalized Generation: This phase in agents mirrors RAG's generative process in Section 4.3 but transcends static template-based generation by integrating user preference and fact alignment. Agents dynamically enforce user preferences and ensure fact consistency through role-specific mechanisms (e.g., social adaptability in conversational agents [2]), enabling outputs to evolve in harmony with personalized and situational constraints rather than relying solely on predefined generative frameworks. + +In general we frame agent architectures as "personalized RAG++", where persistent memory [137] replaces static indexes, and tool APIs [17] serve as dynamic knowledge connectors, enabling complicated, human-aligned interactions beyond one-shot retrieval, as shown in Figure 5. This progression highlights that as RAG systems incorporate deeper personalization—requiring user-state tracking, adaptive tool usage, and context-aware generation, they inherently adopt agent-like capabilities. + +4.4.2 Personalized Understanding. Personalized understanding refers to an agent's ability to accurately interpret user inputs by integrating user intent recognition and contextual analysis. This process ensures interactions that are both meaningful and contextually appropriate. The rationale behind this classification lies in its capacity to address three core aspects of understanding: recognizing user intent, analyzing context, and leveraging user profiles. Each of these aspects plays a distinct role in improving the agent's performance. + +(1). User-profile Understanding. In user-profile understanding, an agent's personalized ability primarily depends on its capacity to accurately model and understand the user's preferences, context, and intentions. Xu et al. [148] proposes a framework in which LLMs are designed to understand the physical world, thereby facilitating a deeper connection between the agent and its environment, which is essential for accurate task execution. Abbasian et al. [2] further expands this understanding by emphasizing the importance of personalization in health agents, where the user's profile directly influences the behavior and decisions of the agent. This user understanding is foundational to ensuring that the AI agent performs tasks in a way that aligns with individual user needs. +(2). Role Understanding. In agent's role understanding, the role of the agent within these environments is also crucial. Recent studies focus on enhancing role-playing capabilities within LLMs. Wang et al. [139] introduce RoleLLM, a benchmark that aims to elicit and refine the role-playing abilities of LLMs, demonstrating how role understanding influences agent performance in conversational tasks. Similarly, Shao et al. [110] present Character-LLM, a trainable agent framework for role-playing, which tailors its responses based on predefined roles. Wang et al. [134] introduce a method for evaluating personality fidelity in role-playing agents through psychological interviews, aiming to enhance the realism and consistency of AI-driven characters. This role understanding allows for more contextually appropriate interactions, increasing the relevance and utility of AI agents across various applications. +(3). User-role Joint Understanding. In agent's user-role joint understanding, the intersection of user and role understanding is explored through frameworks that evaluate and enhance the social and personality aspects of LLMs. SocialBench Chen et al. [18] provides a sociality evaluation framework for role-playing agents. Dai et al. [27], and + +[96] extend this by incorporating multi-modal data and personality-indicative information, respectively, which allows agents to better adapt to both user and role understanding in dynamic environments. Furthermore, Wang et al. [126] offers a perspective on how role and environment understanding can improve user experience. Tu et al. [123] contribute by providing a benchmark specifically for evaluating role-playing agents in the Chinese context, adding a cultural dimension to role understanding. Finally, Neeko [153] further advances role-based interactions. + +4.4.3 Personalized Planning and Execution. Personalized planning and execution refer to the process of designing and implementing strategies or actions that are specifically tailored to an individual's unique context, and goals [44, 87, 114, 159]. It requires agents to dynamically integrate long-term memory, real-time reasoning, and external tool utilization [40, 41, 169], as demonstrated in healthcare decision support [2] and travel planning scenarios [17]. We analyze two fundamental components that enable this personalization in the following. + +(1). Memory Management. Effective memory systems allow agents to integrate users' historical preferences, behavioral patterns, and contextual habits, enhancing their ability to make planning and tailor interactions to user-specific needs [17, 127, 136]. The EMG-RAG framework [137] combines editable memory graphs with retrieval-augmented generation to maintain dynamic user profiles, while Park et al. [87] implements memory streams and periodic reflection mechanisms to simulate human-like behavior. In healthcare applications, Abbasian et al. [2] integrates multimodal user data through specialized memory modules to optimize treatment recommendations. For recommendation systems, RecAgent [133] employs hierarchical memory structures to model user interaction patterns across multiple domains. Recent advances like TravelPlanner+ [114] demonstrate how memory-augmented LLMs achieve higher relevance in personalized itinerary generation compared to generic planners. +(2). Tool and API Calling. The integration of external tools expands agents' capabilities beyond pure linguistic reasoning, enabling agents to interact with users and perform personalized tasks [17, 126, 127, 148, 159]. For instance, VOYAGER [127] establishes a paradigm for lifelong skill acquisition through automatic API curriculum learning and skill library construction. In robotics, Zhang et al. [159] develops a bootstrapping framework where LLMs guide robots in tool-mediated skill discovery, enabling a high success rate in novel object manipulation tasks. The PUMA framework [17] demonstrates how personalized web agents can achieve performance gains in e-commerce tasks through adaptive API orchestration. For mobile interaction, Wang et al. [126] implements few-shot tool learning to handle diverse UI operations with minimal training data. These approaches highlight the importance of tool grounding mechanisms [44] that translate linguistic plans into executable API sequences while maintaining personalization constraints. + +This synthesis highlights that modern agent systems achieve enhanced personalization through two primary strategies: 1) Memory-augmented architectures, which leverage editable memory graphs [137], reflection mechanisms [87], and hierarchical memory structures [133] to dynamically adapt to user preferences across various domains; and 2) Tool and API integration, which expand agent capabilities by balancing generalization with specialization. Future work may explore improving the contextual relevance and adaptability of memory systems while optimizing real-time tool interaction for seamless task execution. + +4.4.4 Personalized Generation. Based on the foundation of personalized planning and execution mechanisms, which enable agents to adapt strategies to user-specific contexts [44, 159], the next critical concern lies in personalized generation. This capability ensures that generated outputs not only align with factual correctness but also resonate with users' unique preferences, personality traits, and situational needs. Personalized generation bridges the gap between + +adaptive reasoning and human-aligned outcomes, allowing agents to produce contextually relevant and emotionally appropriate responses. + +(1). Alignment with User Fact. Alignment with User Fact emphasizes the accuracy, consistency, and factual grounding of personalized responses, ensuring they remain trustworthy across diverse user interactions. This is particularly challenging in personalized agents, where maintaining character authenticity while avoiding hallucinations requires balancing creativity with factual adherence. Recent advances address these challenges through improved training frameworks and evaluation metrics. For instance, Character-LLM [110] integrates memory-augmented architectures to reduce hallucinations while preserving character-specific traits. Wang et al. [135] investigate quantization effects on personality consistency in edge-deployed agents and stabilize outputs under computational constraints. Dai et al. [27] ensures multimodal consistency (text-image) in role-playing. These works highlight the importance of architectural innovations and rigorous evaluation in achieving reliability. +(2). Alignment with User Preferences. Alignment with user preferences ensures that generated outputs reflect individualized personalities, values, and interaction styles. This requires agents to dynamically interpret implicit user cues and adapt responses accordingly. Wang et al. [139] benchmarks role-specific alignment. Ran et al. [96] improves personality fidelity via psychological scale datasets. Wang et al. [134] quantifies alignment via psychological interviews. Chen et al. [18] evaluates social adaptability in conversations. + +4.4.5 Discussion. The architectural evolution from RAG to personalized agents introduces significant advancements in human-AI interaction but also surfaces critical challenges that warrant further investigation. + +Personalized Understanding, while enabling interpretation of user intent and context, faces limitations in real-time adaptability and generalization. Current approaches like RoleLLM [139] and Character-LLM [110] demonstrate robust role-specific comprehension but struggle with dynamic user state tracking, particularly when handling evolving preferences or multi-session interactions. Furthermore, cultural specificity in benchmarks like CharacterEval [123] reveals gaps in global applicability, as agents trained on region-specific data often fail to generalize across diverse sociocultural contexts. Future work could explore hybrid architectures that combine continuous learning mechanisms with privacy-preserving federated learning to address these adaptability constraints while maintaining user trust. + +Personalized Planning and Execution, achieves remarkable task specialization through memory management and tool integration, yet suffers from scalability issues in complex environments. While frameworks like EMG-RAG [137] and VOYAGER [127] effectively manage user-specific constraints, their reliance on predefined API taxonomies limits emergent tool discovery in novel scenarios. The "cold-start" problem persists in domains requiring rapid skill acquisition, as seen in healthcare applications [2], where delayed API responses can compromise decision-making efficacy. A promising direction involves developing meta-reasoning architectures that dynamically prioritize memory recall versus tool invocation based on situational urgency and confidence thresholds. + +Personalized Generation balances factual accuracy with preference alignment but risks over-fitting, where excessive finetuning to user profiles may reinforce cognitive biases. Techniques address surface-level alignment but lack mechanisms for ethical boundary detection. For instance, agents might inadvertently propagate harmful stereotypes when mirroring user preferences without critical oversight. Future systems could integrate value-aligned reinforcement learning with human-in-the-loop validation to preserve authenticity while preventing detrimental customization. + +Table 2. Datasets and metrics for personalized RAG and Agent. + +
FieldMetrics CategoryMetricsDatasets
Pre-retrievalTextual QualityBLEU, ROUGE, EMAvocado Research Email Collection [57, 85], Amazon review [57, 83], Reddit comments[57, 118], Amazon ESCI dataset[82, 97], PIP
Information RetrievalMAP, MRR, NDCG, Precision, Recall, RBPAOL[88, 174], WARRIORS[99], Personalized Results Re-Ranking benchmark [6], delicio.us [9, 15, 144, 172], Flickr [9, 108], CiteULike [10, 14], LRDP [12], Delicious [141], Bibsonomy [79], Wikipedia [8, 33]
ClassificationAccuracy, Macro-F1SCAN [56, 173], AITA WORKSM[53, 80], Robust04 [61]
OthersXEntropy, PMS, Image-Align, PQEC, ProfOverlapAmazon ESCI dataset[82, 97], PIP, Bibsonomy [79]
RetrievalTextual QualityBLEU, ROUGE, Dis, PPLTOPDIAL [130], Pchatbot [93], DuLemon [150]
Information RetrievalRecall, MRR, Precision, F1LiveChat [34], Pchatbot [93], DuLemon [150]
ClassificationAccuracy, SuccTOPDIAL [130], PersonalityEvid [119], DuLemon [150], PersonalityEdit [75]
OthersFluency, Coherence, Plausibility, ES, DD, TPEI, PAEPersonalityEvid [119], PersonalityEdit [75]
GenerationTextual QualityBLEU, ROUGE, Dis, PPL, METEORLaMP [105], Long LaMP [55], Dulemon [150], PGraphRAG [5], AmazonQA/Products [29], Reddit [170], MedicalDialogue [162]
ClassificationAccuracy, F1, Persona F1LaMP [105], Long LaMP [55], Dulemon [150], AmazonQA/Products [29], Reddit [170], MedicalDialogue [162]
RegressionMAE, RMSELaMP [105], Long LaMP [55], PGraphRAG [5]
OthersFluency, Mean Success Rate, Median Relative ImprovementsPersonalized-Gen [3]
AgentTextual QualityBLEU, ROUGE, METEOR, CIDer, EM, Fluency, Coherence, Instruction Adherence, Consistency related metricsRICO [126], RoleBench [139], Shao et al. [110], Socialbench [18], MMRole-Data [27], ROLEPERSONALITY [96], ChatHarui [134], Character-LLM-Data [153], Knowledge Behind Persona [41], Wang et al. [137], Wang et al. [135], Zheng et al. [169]
Information RetrievalRecall, F1, PrecisionKnowledge Behind Persona [41]
ClassificationAccuracy, Failure Rate, Classification Accuracy, Preference Rate, CorrectnessMIT-BIH Arrhythmia Database [148], VirtualHome [44], Socialbench [18], ARC [100], AGIEval [100], HellaSwag [100], MedMCQA [100], AQUA-RAT [100], LogiQA [100], LSAT-AR [100], LSAT-LR [100], LSAT-RC [100], SAT-English [100], SAT-Math [100], PersonalWAB [17], TravelPlanner+ [114]
OthersPass@k, Executability, Productivity, Plausibility of the StoryHong et al. [40], Zheng et al. [169]
+ +# 5 EVALUATION AND DATASET + +In the evolving landscape of personalization, from RAG to advanced Agent-based systems, the evaluation of models relies heavily on diverse datasets and metrics tailored to specific tasks. This survey categorizes metrics into several key types: Textual Quality metrics (e.g., BLEU, ROUGE, METEOR) assess the fluency and coherence of generated outputs; Information Retrieval metrics (e.g., MAP, MRR, Recall) evaluate the accuracy and relevance of retrieved information; Classification metrics (e.g., Accuracy, F1) measure task-specific correctness; Regression metrics (e.g., MAE, RMSE) quantify prediction errors; and Other metrics (e.g., Fluency, Pass@k) address domain-specific or task-unique aspects like plausibility or executability. These metrics span pre-retrieval, retrieval, generation, and agent-based personalization approaches, reflecting their varied objectives. To provide a comprehensive overview, we compile an extensive list of datasets across these fields, as detailed in Table 2. These datasets, paired with their respective metrics, enable researchers to benchmark and refine personalized systems, from enhancing query rewriting to enabling autonomous agents in physical and virtual environments. + +# 6 CHALLENGES AND FUTURE DIRECTIONS + +Personalized RAG and agent-based systems still face several critical challenges that warrant further exploration. We list them as follows: + +- Balancing Personalization and Scalability: Integrating personalization data (such as preferences, history, and contextual signals) into RAG processes often increases computational complexity, making it difficult to maintain + +efficiency and scalability across large-scale systems. Future work could explore lightweight, adaptive embeddings and hybrid frameworks that seamlessly fuse user profiles with real-time contexts. + +- Evaluating Personalization Effectively: Current metrics like BLEU, ROUGE, and human evaluations fall short in capturing the nuanced alignment of outputs with dynamic user preferences, lacking tailored measures for personalization efficacy. Developing specialized benchmarks and metrics that assess long-term user satisfaction and adaptability is crucial for real-world applicability. +- Preserving Privacy through Device-Cloud Collaboration: Personalized retrieval often involves processing sensitive user data, raising privacy concerns, especially with the increased global emphasis on data protection regulations, such as the European Union's General Data Protection Regulation (GDPR). Consequently, a promising approach is the collaborative integration of on-device small Language models which handle sensitive personal data locally, with cloud-based LLM, which provides broader contextual knowledge. +- Personalized Agent Planning: Current research on agent planning remains mainly in its early stages, with much of the work focusing on building foundational frameworks such as GUI agents [81] and the application of agents across diverse domains [131]. Notably, the incorporation of personalized approaches has yet to be widely adopted. Exploring how to integrate personalized support into existing frameworks to enhance user experience represents a promising and valuable direction for future investigation. +- Ensuring Ethical and Coherent Systems: Bias in data processing, privacy concerns in user profiling, and coherence across retrieval and generation stages remain unresolved. Future directions should prioritize ethical safeguards, privacy-preserving techniques, and cross-stage optimization to build trustworthy, unified personalized systems. + +# 7 CONCLUSION + +In this paper, we explore the landscape of personalization from Retrieval-Augmented Generation (RAG) to advanced LLM-based Agents, detailing adaptations across pre-retrieval, retrieval, and generation stages while extending into agentic capabilities. By reviewing recent literature, datasets, and metrics, we highlight the progress and diversity in enhancing user satisfaction through tailored AI systems. However, challenges such as scalability, effective evaluation, and ethical concerns underscore the need for innovative solutions. Future research should focus on lightweight frameworks, specialized benchmarks, and privacy-preserving techniques to advance personalized AI. Relevant papers and resources are also compiled online for ease of future research. + +# REFERENCES + +[1] 2021. BERT: a review of applications in natural language processing and understanding. arXiv preprint arXiv:2103.11943 (2021). +[2] Mahyar Abbasian, Iman Azimi, Amir M Rahmani, and Ramesh Jain. 2023. Conversational health agents: A personalized llm-powered agent framework. arXiv preprint arXiv:2310.02374 (2023). +[3] Bashar Alhafni, Vivek Kulkarni, Dhruv Kumar, and Vipul Raheja. 2024. Personalized Text Generation with Fine-Grained Linguistic Control. In Proceedings of the 1st Workshop on Personalization of Generative AI Systems (PERSONALIZE 2024). 88–101. +[4] Amazon. [n.d.]. Amazon Customer Review Dataset. Online dataset. https://nijianmo.github.io/amazon/ +[5] Steven Au, Cameron J Dimacali, Ojasmitha Pedirappagari, Namyong Park, Franck Dernoncourt, Yu Wang, Nikos Kanakaris, Hanieh Deilamsalehy, Ryan A Rossi, and Nesreen K Ahmed. 2025. Personalized Graph-Based Retrieval for Large Language Models. arXiv preprint arXiv:2501.02157 (2025). +[6] Elias Bassani, Pranav Kasela, Alessandro Raganato, and Gabriella Pasi. 2022. A multi-domain benchmark for personalized search evaluation. In Proceedings of the 31st ACM International Conference on Information & Knowledge Management. 3822-3827. +[7] Elias Bassani, Nicola Tonellotto, and Gabriella Pasi. 2023. Personalized query expansion with contextual word embeddings. ACM Transactions on Information Systems 42, 2 (2023), 1-35. +[8] Oliver Baumann and Mirco Schoenfeld. 2024. PSQE: Personalized Semantic Query Expansion for user-centric query disambiguation. (2024). + +[9] Matthias Bender, Tom Crecelius, Mouna Kacimi, Sebastian Michel, Thomas Neumann, Josiane Xavier Parreira, Ralf Schenkel, and Gerhard Weikum. 2008. Exploiting social relations for query expansion and result ranking. In 2008 IEEE 24th International Conference on Data Engineering Workshop. IEEE, 501-506. +[10] Marin Bertier, Rachid Guerraoui, Vincent Leroy, and Anne-Marie Kermarrec. 2009. Toward personalized query expansion. In Proceedings of the Second ACM EuroSys Workshop on Social Network Systems. 7-12. +[11] Keping Bi, Qingyao Ai, and W Bruce Croft. 2021. Learning a fine-grained review-based transformer model for personalized product search. In Proceedings of the 44th international ACM SIGIR conference on research and development in information retrieval. 123-132. +[12] Claudio Biancalana and Alessandro Micarelli. 2009. Social tagging in query expansion: A new way for personalized web search. In 2009 International Conference on Computational Science and Engineering, Vol. 4. IEEE, 1060-1065. +[13] Microsoft Bing. [n.d]. Bing Search Engine. https://www.bing.com +[14] Mohamed Reda Bouadjenek, Hakim Hacid, and Mokrane Bouzeghoub. 2019. Personalized social query expansion using social annotations. Transactions on Large-Scale Data-and Knowledge-Centered Systems XL (2019), 1-25. +[15] Mohamed Reda Bouadjenek, Hakim Hacid, Mokrane Bouzeghoub, and Johann Daigremont. 2011. Personalized social query expansion using social bookmarking systems. In Proceedings of the 34th international ACM SIGIR conference on Research and development in Information Retrieval. 1113-1114. +[16] Domenico Bulfamante. 2023. Generative enterprise search with extensible knowledge base using ai. Ph.D. Dissertation. Politecnico di Torino. +[17] Hongru Cai, Yongqi Li, Wenjie Wang, ZHU Fengbin, Xiaoyu Shen, Wenjie Li, and Tat-Seng Chua. [n. d]. Large Language Models Empowered Personalized Web Agents. In THE WEB CONFERENCE 2025. +[18] Hongzhan Chen, Hehong Chen, Ming Yan, Wenshen Xu, Xing Gao, Weizhou Shen, Xiaojun Quan, Chenliang Li, Ji Zhang, Fei Huang, et al. 2024. Socialbench: Sociality evaluation of role-playing conversational agents. arXiv preprint arXiv:2403.13679 (2024). +[19] Jiangjie Chen, Xintao Wang, Rui Xu, Siyu Yuan, Yikai Zhang, Wei Shi, Jian Xie, Shuang Li, Ruihan Yang, Tinghui Zhu, et al. 2024. From persona to personalization: A survey on role-playing language agents. arXiv preprint arXiv:2404.18231 (2024). +[20] Ruizhe Chen, Xiaotian Zhang, Meng Luo, Wenhao Chai, and Zuozhu Liu. 2024. Pad: Personalized alignment of llms at decoding-time. arXiv preprint arXiv:2410.04070 (2024). +[21] Weijie Chen, Ting Bai, Jinbo Su, Jian Luan, Wei Liu, and Chuan Shi. 2024. Kg-retriever: Efficient knowledge indexing for retrieval-augmented large language models. arXiv preprint arXiv:2412.05547 (2024). +[22] Zheng Chen, Ziyan Jiang, Fan Yang, Eunah Cho, Xing Fan, Xiaojiang Huang, Yanbin Lu, and Aram Galstyan. 2023. Graph meets LLM: A novel approach to collaborative filtering for robust conversational understanding. arXiv preprint arXiv:2305.14449 (2023). +[23] Hao Cheng, Shuo Wang, Wensheng Lu, Wei Zhang, Mingyang Zhou, Kezhong Lu, and Hao Liao. 2023. Explainable recommendation with personalized review retrieval and aspect learning. arXiv preprint arXiv:2306.12657 (2023). +[24] Alexis Chevalier, Alexander Wettig, Anirudh Ajith, and Danqi Chen. 2023. Adapting language models to compress contexts. arXiv preprint arXiv:2305.14788 (2023). +[25] Eunah Cho, Ziyan Jiang, Jie Hao, Zheng Chen, Saurabh Gupta, Xing Fan, and Chenlei Guo. 2021. Personalized search-based query rewrite system for conversational ai. In Proceedings of the 3rd Workshop on Natural Language Processing for Conversational AI. 179-188. +[26]Hyundong Cho, Karishma Sharma, Nicolaas Jedema, Leonardo FR Ribeiro, Alessandro Moschitti, Ravi Krishnan, and Jonathan May. 2025. TuningFree Personalized Alignment via Trial-Error-Explain In-Context Learning. arXiv preprint arXiv:2502.08972 (2025). +[27] Yanqi Dai, Huanran Hu, Lei Wang, Shengjie Jin, Xu Chen, and Zhiwu Lu. 2024. Mmrole: A comprehensive framework for developing and evaluating multimodal role-playing agents. arXiv preprint arXiv:2408.04203 (2024). +[28] Wenlong Deng, Christos Thrampoulidis, and Xiaoxiao Li. 2024. Unlocking the potential of prompt-tuning in bridging generalized and personalized federated learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 6087-6097. +[29] Yang Deng, Yaliang Li, Wenxuan Zhang, Bolin Ding, and Wai Lam. 2022. Toward personalized answer generation in e-commerce via multiperspective preference modeling. ACM Transactions on Information Systems (TOIS) 40, 4 (2022), 1-28. +[30] Matthijs Douze, Alexandr Guzhva, Chengqi Deng, Jeff Johnson, Gergely Szilvasy, Pierre-Emmanuel Mazaré, Maria Lomeli, Lucas Hosseini, and Hervé Jégou. 2024. The Faiss library. (2024). arXiv:2401.08281 [cs.LG] +[31] ESPN. [n.d.]. ESPN Sports Statistics Dataset. Online dataset. +[32] Wenqi Fan, Yujuan Ding, Liangbo Ning, Shijie Wang, Hengyun Li, Dawei Yin, Tat-Seng Chua, and Qing Li. 2024. A survey on rag meeting llms: Towards retrieval-augmented large language models. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 6491-6501. +[33] Byron J Gao, David C Anastasiu, and Xing Jiang. 2010. Utilizing user-input contextual terms for query disambiguation. In Coling 2010: Posters. 329-337. +[34] Jingsheng Gao, Yixin Lian, Ziyi Zhou, Yuzhuo Fu, and Baoyuan Wang. 2023. LiveChat: A large-scale personalized dialogue dataset automatically constructed from live streaming. arXiv preprint arXiv:2306.08401 (2023). +[35] Yunfan Gao, Yun Xiong, Xinyu Gao, Kangxiang Jia, Jinliu Pan, Yuxi Bi, Yi Dai, Jiawei Sun, Haofen Wang, and Haofen Wang. 2023. Retrievalaugmented generation for large language models: A survey. arXiv preprint arXiv:2312.10997 2 (2023). +[36] Google. [n.d.]. Google Search. https://www.google.com + +[37] Jia-Chen Gu, Hui Liu, Zhen-Hua Ling, Quan Liu, Zhigang Chen, and Xiaodan Zhu. 2021. Partner matters! an empirical study on fusing personas for personalized response selection in retrieval-based chatbots. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval. 565-574. +[38] Jie Hao, Yang Liu, Xing Fan, Saurabh Gupta, Saleh Soltan, Rakesh Chada, Pradeep Natarajan, Chenlei Guo, and Gokhan Tur. 2022. CGF: Constrained generation framework for query rewriting in conversational AI. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track. 475-483. +[39] Nicola Henze, Peter Dolog, and Wolfgang Nejdl. 2004. Reasoning and ontologies for personalized e-learning in the semantic web. Journal of Educational Technology & Society 7, 4 (2004), 82-97. +[40] Sirui Hong, Xiawu Zheng, Jonathan Chen, Yuheng Cheng, Jinlin Wang, Ceyao Zhang, Zili Wang, Steven Ka Shing Yau, Zijuan Lin, Liyang Zhou, et al. 2023. Metagpt: Meta programming for multi-agent collaborative framework. arXiv preprint arXiv:2308.00352 3, 4 (2023), 6. +[41] WANG Hongru, Minda Hu, Yang Deng, Rui Wang, Fei Mi, Weichao Wang, Yasheng Wang, Wai-Chung Kwan, Irwin King, and Kam-Fai Wong. [n. d]. Large Language Models as Source Planner for Personalized Knowledge-grounded Dialogues. In The 2023 Conference on Empirical Methods in Natural Language Processing. +[42] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. 2022. Lora: Low-rank adaptation of large language models. ICLR 1, 2 (2022), 3. +[43] Qiushi Huang, Shuai Fu, Xubo Liu, Wenwu Wang, Tom Ko, Yu Zhang, and Lilian Tang. 2024. Learning retrieval augmentation for personalized dialogue generation. arXiv preprint arXiv:2406.18847 (2024). +[44] Wenlong Huang, Pieter Abbeel, Deepak Pathak, and Igor Mordatch. 2022. Language models as zero-shot planners: Extracting actionable knowledge for embodied agents. In International conference on machine learning. PMLR, 9118-9147. +[45] Xu Huang, Weiwen Liu, Xiaolong Chen, Xingmei Wang, Hao Wang, Defu Lian, Yasheng Wang, Ruiming Tang, and Enhong Chen. 2024. Understanding the planning of LLM agents: A survey. arXiv preprint arXiv:2402.02716 (2024). +[46] Rolf Jagerman, Honglei Zhuang, Zhen Qin, Xuanhui Wang, and Michael Bendersky. 2023. Query expansion by prompting large language models. arXiv preprint arXiv:2305.03653 (2023). +[47] Joel Jang, Seungone Kim, Bill Yuchen Lin, Yizhong Wang, Jack Hessel, Luke Zettlemoyer, Hannaneh Hajishirzi, Yejin Choi, and Prithviraj Ammanabrolu. 2023. Personalized soups: Personalized large language model alignment via post-hoc parameter merging. arXiv preprint arXiv:2310.11564 (2023). +[48] Pengyue Jia, Yiding Liu, Xiangyu Zhao, Xiaopeng Li, Changying Hao, Shuaiqiang Wang, and Dawei Yin. 2024. MILL: Mutual Verification with Large Language Models for Zero-Shot Query Expansion. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers). 2498-2518. +[49] Guangyuan Jiang, Manjie Xu, Song-Chun Zhu, Wenjuan Han, Chi Zhang, and Yixin Zhu. 2023. Evaluating and inducing personality in pre-trained language models. Advances in Neural Information Processing Systems 36 (2023), 10622-10643. +[50] Hideaki Joko, Shubham Chatterjee, Andrew Ramsay, Arjen P De Vries, Jeff Dalton, and Faegheh Hasibi. 2024. Doing personal laps: Llm-augmented dialogue construction for personalized multi-session conversational search. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 796-806. +[51] Wang-Cheng Kang, Jianmo Ni, Nikhil Mehta, Maheswaran Sathiamoorthy, Lichan Hong, Ed Chi, and Derek Zhiyuan Cheng. 2023. Do llms understand user preferences? evaluating llms on user rating prediction. arXiv preprint arXiv:2305.06474 (2023). +[52] Manojkumar Rangasamy Kannadasan and Grigor Aslanyan. 2019. Personalized query auto-completion through a lightweight representation of the user context. arXiv preprint arXiv:1905.01386 (2019). +[53] Anjuli Kannan, Karol Kurach, Sujith Ravi, Tobias Kaufmann, Andrew Tomkins, Balint Miklos, Greg Corrado, Laszlo Lukacs, Marina Ganea, Peter Young, and Vivek Ramavajjala. 2016. Smart Reply: Automated Response Suggestion for Email. In Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (San Francisco, California, USA) (KDD '16). Association for Computing Machinery, New York, NY, USA, 955-964. https://doi.org/10.1145/2939672.2939801 +[54] Mandar Kulkarni, Praveen Tangarajan, Kyung Kim, and Anusua Trivedi. 2024. Reinforcement learning for optimizing rag for domain chatbots. arXiv preprint arXiv:2401.06800 (2024). +[55] Ishita Kumar, Snigdha Viswanathan, Sushrita Yerra, Alireza Salemi, Ryan A Rossi, Franck Dernoncourt, Hanieh Deilamsalehy, Xiang Chen, Ruiyi Zhang, Shubham Agarwal, et al. 2024. Longlamp: A benchmark for personalized long-form text generation. arXiv preprint arXiv:2407.11016 (2024). +[56] Brenden Lake and Marco Baroni. 2018. Generalization without systematicity: On the compositional skills of sequence-to-sequence recurrent networks. In International conference on machine learning. PMLR, 2873-2882. +[57] Cheng Li, Mingyang Zhang, Qiao zhu Mei, Weize Kong, and Michael Bendersky. 2024. Learning to rewrite prompts for personalized text generation. In Proceedings of the ACM Web Conference 2024. 3367-3378. +[58] Changhao Li, Yuchen Zhuang, Rushi Qiang, Haotian Sun, Hanjun Dai, Chao Zhang, and Bo Dai. 2024. Matryoshka: Learning to Drive Black-Box LLMs with LLMs. arXiv preprint arXiv:2410.20749 (2024). +[59] Lei Li, Yongfeng Zhang, and Li Chen. 2023. Personalized prompt learning for explainable recommendation. ACM Transactions on Information Systems 41, 4 (2023), 1-26. +[60] Sen Li, Fuyu Lv, Taiwei Jin, Guiyang Li, Yukun Zheng, Tao Zhuang, Qingwen Liu, Xiaoyi Zeng, James Kwok, and Qianli Ma. 2022. Query rewriting in taobao search. In Proceedings of the 31st ACM International Conference on Information & Knowledge Management. 3262-3271. + +[61] Xiaopeng Li, Lixin Su, Pengyue Jia, Xiangyu Zhao, Suqi Cheng, Junfeng Wang, and Dawei Yin. 2023. Agent4ranking: Semantic robust ranking via personalized query rewriting using multi-agent llm. arXiv preprint arXiv:2312.15450 (2023). +[62] Xinyu Li, Ruiyang Zhou, Zachary C Lipton, and Liu Leqi. 2024. Personalized language modeling from personalized human feedback. arXiv preprint arXiv:2402.05133 (2024). +[63] Yuanchun Li, Hao Wen, Weijun Wang, Xiangyu Li, Yizhen Yuan, Guohong Liu, Jiacheng Liu, Wenxing Xu, Xiang Wang, Yi Sun, et al. 2024. Personal llm agents: Insights and survey about the capability, efficiency and security. arXiv preprint arXiv:2401.05459 (2024). +[64] Zehan Li, Xin Zhang, Yanzhao Zhang, Dingkun Long, Pengjun Xie, and Meishan Zhang. 2023. Towards general text embeddings with multi-stage contrastive learning. arXiv preprint arXiv:2308.03281 (2023). +[65] Ruixue Lian, Sixing Lu, Clint Solomon, Gustavo Aguilar, Pragaash Ponnusamy, Jialong Han, Chengyuan Ma, and Chenlei Guo. 2023. PersonalTM: Transformer memory for personalized retrieval. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval. 2256-2260. +[66] Shan-Mu Lin and Chuen-Min Huang. 2006. Personalized optimal search in local query expansion. In Proceedings of the 18th Conference on Computational Linguistics and Speech Processing. 221-236. +[67] Junling Liu, Chao Liu, Peilin Zhou, Renjie Lv, Kang Zhou, and Yan Zhang. 2023. Is chatgpt a good recommender? a preliminary study. arXiv preprint arXiv:2304.10149 (2023). +[68] Jiahong Liu, Zexuan Qiu, Zhongyang Li, Quanyu Dai, Jieming Zhu, Minda Hu, Menglin Yang, and Irwin King. 2025. A Survey of Personalized Large Language Models: Progress and Future Directions. arXiv preprint arXiv:2502.11528 (2025). +[69] Nelson F Liu, Kevin Lin, John Hewitt, Ashwin Paranjape, Michele Bevilacqua, Fabio Petroni, and Percy Liang. 2024. Lost in the middle: How language models use long contexts. Transactions of the Association for Computational Linguistics 12 (2024), 157-173. +[70] Qijiong Liu, Nuo Chen, Tetsuya Sakai, and Xiao-Ming Wu. 2024. Once: Boosting content-based recommendation with both open-and closed-source large language models. In Proceedings of the 17th ACM International Conference on Web Search and Data Mining. 452-461. +[71] Shuai Liu, Hyundong J Cho, Marjorie Freedman, Xuezhe Ma, and Jonathan May. 2023. RECAP: retrieval-enhanced context-aware prefix encoder for personalized dialogue response generation. arXiv preprint arXiv:2306.07206 (2023). +[72] Tyler Lu and Craig Boutilier. 2011. Budgeted social choice: From consensus to personalized decision making. In *IJCAI*, Vol. 11, 280-286. +[73] Zhengyi Ma, Zhicheng Dou, Yutao Zhu, Hanxun Zhong, and Ji-Rong Wen. 2021. One chatbot per person: Creating personalized chatbots based on implicit user profiles. In Proceedings of the 44th international ACM SIGIR conference on research and development in information retrieval. 555-564. +[74] Aman Madaan, Niket Tandon, Peter Clark, and Yiming Yang. 2022. Memory-assisted prompt editing to improve GPT-3 after deployment. arXiv preprint arXiv:2201.06009 (2022). +[75] Shengyu Mao, Xiaohan Wang, Mengru Wang, Yong Jiang, Pengjun Xie, Fei Huang, and Ningyu Zhang. 2024. Editing Personality for Large Language Models. In CCF International Conference on Natural Language Processing and Chinese Computing. Springer, 241-254. +[76] Yuren Mao, Xuemei Dong, Wenyi Xu, Yunjun Gao, Bin Wei, and Ying Zhang. 2024. Fit-rag: black-box rag with factual information and token reduction. arXiv preprint arXiv:2403.14374 (2024). +[77] Puneet Mathur, Zhe Liu, Ke Li, Yingyi Ma, Gil Keren, Zeeshan Ahmed, Dinesh Manocha, and Xuedong Zhang. 2023. Personal: Language model personalization via domain-distributed span aggregated k-nearest n-gram retrieval augmentation. In Findings of the Association for Computational Linguistics: EMNLP 2023. 11314-11328. +[78] Fatemehsadat Mireshghallah, Vaishnavi Shrivastava, Milad Shokouhi, Taylor Berg-Kirkpatrick, Robert Sim, and Dimitrios Dimitriadis. 2021. Identifier: Implicit user representations for simple and effective personalized sentiment analysis. arXiv preprint arXiv:2110.00135 (2021). +[79] Philippe Mulhem, Nawal Ould Amer, and Mathias Gery. 2016. Axiomatic term-based personalized query expansion using bookmarking system. In International Conference on Database and Expert Systems Applications. Springer, 235-243. +[80] Sheshera Mysore, Zhuoran Lu, Mengting Wan, Longqi Yang, Steve Menezes, Tina Baghaee, Emmanuel Barajas Gonzalez, Jennifer Neville, and Tara Safavi. 2023. Pearl: Personalizing large language model writing assistants with generation-calibrated retrievers. arXiv preprint arXiv:2311.09180 (2023). +[81] Dang Nguyen, Jian Chen, Yu Wang, Gang Wu, Namyong Park, Zhengmian Hu, Hanjia Lyu, Junda Wu, Ryan Aponte, Yu Xia, et al. 2024. Gui agents: A survey. arXiv preprint arXiv:2412.13501 (2024). +[82] Duy A Nguyen, Rishi Kesav Mohan, Van Yang, Pritom Saha Akash, and Kevin Chen-Chuan Chang. 2025. RL-based Query Rewriting with Distilled LLM for online E-Commerce Systems. arXiv preprint arXiv:2501.18056 (2025). +[83] Jianmo Ni, Jiacheng Li, and Julian McAuley. 2019. Justifying recommendations using distantly-labeled reviews and fine-grained aspects. In Proceedings of the 2019 conference on empirical methods in natural language processing and the 9th international joint conference on natural language processing (EMNLP-IJCNLP). 188-197. +[84] Lin Ning, Luyang Liu, Jiaxing Wu, Neo Wu, Devora Berlowitz, Sushant Prakash, Bradley Green, Shawn O'Banion, and Jun Xie. 2024. User-llm: Efficient llm contextualization with user embeddings. arXiv preprint arXiv:2402.13598 (2024). +[85] Douglas Oard, William Webber, David Kirsch, and Sergey Golitsynski. 2015. Avocado research email collection. Philadelphia: Linguistic Data Consortium (2015). +[86] U.S. National Library of Medicine. [n.d.]. PubMed: A Free Resource for Biomedical Literature. https://pubmed.ncbi.nlm.nih.gov/ +[87] Joon Sung Park, Joseph O'Brien, Carrie Jun Cai, Meredith Ringel Morris, Percy Liang, and Michael S Bernstein. 2023. Generative agents: Interactive simulacra of human behavior. In Proceedings of the 36th annual acm symposium on user interface software and technology. 1-22. + +[88] Greg Pass, Abdur Chowdhury, and Cayley Torgeson. 2006. A picture of search. In Proceedings of the 1st International Conference on Scalable Information Systems (Hong Kong) (InfoScale '06). Association for Computing Machinery, New York, NY, USA, 1-es. https://doi.org/10.1145/1146847.1146848 +[89] Vadim Igorevich Pavliukevich, Alina Khasanovna Zherdeva, Olesya Vladimirovna Makhnytkina, and Dmitriy Viktorovich Dyrmovskiy. [n. d.]. Improving RAG with LoRA finetuning for persona text generation. ([n. d.]). +[90] Dan Peng, Zhihui Fu, and Jun Wang. 2024. Pocketllm: Enabling on-device fine-tuning for personalized llms. arXiv preprint arXiv:2407.01031 (2024). +[91] Qiyao Peng, Hongtao Liu, Hongyan Xu, Qing Yang, Minglai Shao, and Wenjun Wang. 2024. Review-LLM: Harnessing Large Language Models for Personalized Review Generation. arXiv:2407.07487 [cs.CL] https://arxiv.org/abs/2407.07487 +[92] Hongjin Qian, Zhicheng Dou, Yutao Zhu, Yueyuan Ma, and Ji-Rong Wen. 2021. Learning implicit user profile for personalized retrieval-based chatbot. In proceedings of the 30th ACM international conference on Information & Knowledge Management. 1467-1477. +[93] Hongjin Qian, Xiahe Li, Hanxun Zhong, Yu Guo, Yueyuan Ma, Yutao Zhu, Zhanliang Liu, Zhicheng Dou, and Ji-Rong Wen. 2021. Pchatbot: a large-scale dataset for personalized chatbot. In Proceedings of the 44th international ACM SIGIR conference on research and development in information retrieval. 2470-2477. +[94] Xiaoru Qu, Yifan Wang, Zhao Li, and Jun Gao. 2024. Graph-enhanced prompt learning for personalized review generation. Data Science and Engineering 9, 3 (2024), 309-324. +[95] A. Rajaraman and J.D. Ullman. 2011. Mining of Massive Datasets. Cambridge University Press. https://books.google.co.uk/books?id=OefRhZyYOb0C +[96] Yiting Ran, Xintao Wang, Rui Xu, Xinfeng Yuan, Jiaqing Liang, Deqing Yang, and Yanghua Xiao. 2024. Capturing minds, not just words: Enhancing role-playing language models with personality-indicative data. arXiv preprint arXiv:2406.18921 (2024). +[97] Chandan K. Reddy, Lluis Marquez, Fran Valero, Nikhil Rao, Hugo Zaragoza, Sambaran Bandyopadhyay, Arnab Biswas, Anlu Xing, and Karthik Subbian. 2022. Shopping Queries Dataset: A Large-Scale ESCI Benchmark for Improving Product Search. (2022). arXiv:2206.06588 +[98] Nils Reimers and Iryna Gurevych. 2019. Sentence-bert: Sentence embeddings using siamese bert-networks. arXiv preprint arXiv:1908.10084 (2019). +[99] Ruiyang Ren, Peng Qiu, Yingqi Qu, Jing Liu, Wayne Xin Zhao, Hua Wu, Ji-Rong Wen, and Haifeng Wang. 2024. Bases: Large-scale web search user simulation with large language model based agents. arXiv preprint arXiv:2402.17505 (2024). +[100] Matthew Renze and Erhan Guven. 2024. Self-reflection in llm agents: Effects on problem-solving performance. arXiv preprint arXiv:2405.06682 (2024). +[101] Chris Richardson, Yao Zhang, Kellen Gillespie, Sudipta Kar, Arshdeep Singh, Zeynab Raeesy, Omar Zia Khan, and Abhinav Sethy. 2023. Integrating summarization and retrieval for enhanced personalization via large language models. arXiv preprint arXiv:2310.20081 (2023). +[102] Stephen Robertson, Hugo Zaragoza, et al. 2009. The probabilistic relevance framework: BM25 and beyond. Foundations and Trends in Information Retrieval 3, 4 (2009), 333-389. +[103] Alireza Salemi, Surya Kallumadi, and Hamed Zamani. 2024. Optimization methods for personalizing large language models through retrieval augmentation. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 752-762. +[104] Alireza Salemi, Cheng Li, Mingyang Zhang, Qiao zhu Mei, Weize Kong, Tao Chen, Zhuowan Li, Michael Bendersky, and Hamed Zamani. 2025. Reasoning-Enhanced Self-Training for Long-Form Personalized Text Generation. arXiv preprint arXiv:2501.04167 (2025). +[105] Alireza Salemi, Sheshera Mysore, Michael Bendersky, and Hamed Zamani. 2024. LaMP: When Large Language Models Meet Personalization. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). 7370-7392. +[106] Alireza Salemi and Hamed Zamani. 2024. Learning to Rank for Multiple Retrieval-Augmented Models through Iterative Utility Maximization. arXiv preprint arXiv:2410.09942 (2024). +[107] Shibani Santurkar, Esin Durmus, Faisal Ladhak, Cinoo Lee, Percy Liang, and Tatsunori Hashimoto. 2023. Whose opinions do language models reflect?. In International Conference on Machine Learning. PMLR, 29971-30004. +[108] Rossano Schifanella, Alain Barrat, Ciro Cattuto, Benjamin Markines, and Filippo Menczer. 2010. Folks in folksonomies: social link prediction from shared metadata. In Proceedings of the third ACM international conference on Web search and data mining. 271-280. +[109] Noor Shaker, Georgios Yannakakis, and Julian Togelius. 2010. Towards automatic personalized content generation for platform games. In Proceedings of the AAAI Conference on Artificial Intelligence and Interactive Digital Entertainment, Vol. 6. 63-68. +[110] Yunfan Shao, Linyang Li, Junqi Dai, and Xipeng Qiu. 2023. Character-llm: A trainable agent for role-playing. arXiv preprint arXiv:2310.10158 (2023). +[111] Jocelyn Shen, Joel Mire, Hae Won Park, Cynthia Breazeal, and Maarten Sap. 2024. HEART-felt Narratives: Tracing Empathy and Narrative Style in Personal Stories with LLMs. arXiv preprint arXiv:2405.17633 (2024). +[112] Yunxiao Shi, Xing Zi, Zijing Shi, Haimin Zhang, Qiang Wu, and Min Xu. 2024. Eragent: Enhancing retrieval-augmented language models with improved accuracy, efficiency, and personalization. arXiv preprint arXiv:2405.06683 (2024). +[113] Aditi Singh, Abul Ehtesham, Saket Kumar, and Tala Talaei Khoei. 2025. Agentic Retrieval-Augmented Generation: A Survey on Agentic RAG. arXiv preprint arXiv:2501.09136 (2025). +[114] Harmanpreet Singh, Nikhil Verma, Yixiao Wang, Manasa Bharadwaj, Homa Fashandi, Kevin Ferreira, and Chul Lee. 2024. Personal Large Language Model Agents: A Case Study on Tailored Travel Planning. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: Industry Track. 486-514. +[115] Shamane Siriwardhana, Rivindu Weerasekera, Elliott Wen, Tharindu Kaluarachchi, Rajib Rana, and Suranga Nanayakkara. 2023. Improving the domain adaptation of retrieval augmented generation (RAG) models for open domain question answering. Transactions of the Association for Computational Linguistics 11 (2023), 1-17. +[116] Mingyang Song and Mao Zheng. 2024. A Survey of Query Optimization in Large Language Models. arXiv preprint arXiv:2412.17558 (2024). + +[117] Spotify. 2023. Annoy: Approximate Nearest Neighbors in C++/Python. https://github.com/spotify/annoy +[118] Stuck_In_the Matrix. 2015. Reddit Public Comments (2007-10 through 2015-05). (2015). https://www.reddit.com/r/datasets/comments/3bxlg7/i_have EVERY_publicly-available Reddit_COMMENT/ +[119] Lei Sun, Jinming Zhao, and Qin Jin. 2024. Revealing Personality Traits: A New Benchmark Dataset for Explanable Personality Recognition on Dialogues. arXiv preprint arXiv:2409.19723 (2024). +[120] Zhaoxuan Tan, Zheyuan Liu, and Meng Jiang. 2024. Personalized pieces: Efficient personalized large language models through collaborative efforts. arXiv preprint arXiv:2406.10471 (2024). +[121] Zhaoxuan Tan, Qingkai Zeng, Yijun Tian, Zheyuan Liu, Bing Yin, and Meng Jiang. 2024. Democratizing large language models via personalized parameter-efficient fine-tuning. arXiv preprint arXiv:2402.04401 (2024). +[122] Zhaoxuan Tan, Qingkai Zeng, Yijun Tian, Zheyuan Liu, Bing Yin, and Meng Jiang. 2025. Democratizing Large Language Models via Personalized Parameter-Efficient Fine-tuning. arXiv:2402.04401 [cs.CL] https://arxiv.org/abs/2402.04401 +[123] Quan Tu, Shilong Fan, Zihang Tian, and Rui Yan. 2024. Charactereval: A Chinese benchmark for role-playing conversational agent evaluation. arXiv preprint arXiv:2401.01275 (2024). +[124] Cornell University. [n.d.]. arXiv: An Open Access Repository for Research. https://arxiv.org/ +[125] Hemanth Vemuri, Sheshansh Agrawal, Shivam Mittal, Deepak Saini, Akshay Soni, Abhinav V Sambasivan, Wenhao Lu, Yajun Wang, Mehul Parsana, Purushottam Kar, et al. 2023. Personalized retrieval over millions of items. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1014-1022. +[126] Bryan Wang, Gang Li, and Yang Li. 2023. Enabling conversational interaction with mobile ui using large language models. In Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems. 1-17. +[127] Guanzhi Wang, Yuqi Xie, Yunfan Jiang, Ajay Mandlekar, Chaowei Xiao, Yuke Zhu, Linxi Fan, and Anima Anandkumar. 2023. Voyager: An open-ended embodied agent with large language models. arXiv preprint arXiv:2305.16291 (2023). +[128] Hongru Wang, Wenyu Huang, Yang Deng, Rui Wang, Zezhong Wang, Yufei Wang, Fei Mi, Jeff Z Pan, and Kam-Fai Wong. 2024. Unims-rag: A unified multi-source retrieval-augmented generation for personalized dialogue systems. arXiv preprint arXiv:2401.13256 (2024). +[129] Hongru Wang, Rui Wang, Fei Mi, Yang Deng, Zezhong Wang, Bin Liang, Ruifeng Xu, and Kam-Fai Wong. 2023. Cue-CoT: Chain-of-thought prompting for responding to in-depth dialogue questions with LLMs. arXiv preprint arXiv:2305.11792 (2023). +[130] Jian Wang, Yi Cheng, Dongding Lin, Chak Tou Leong, and Wenjie Li. 2023. Target-oriented proactive dialogue systems with personalization: Problem formulation and dataset curation. arXiv preprint arXiv:2310.07397 (2023). +[131] Lei Wang, Chen Ma, Xueyang Feng, Zeyu Zhang, Hao Yang, Jingsen Zhang, Zhiyuan Chen, Jiakai Tang, Xu Chen, Yankai Lin, et al. 2024. A survey on large language model based autonomous agents. Frontiers of Computer Science 18, 6 (2024), 186345. +[132] Liang Wang, Nan Yang, and Furu Wei. 2023. Query2doc: Query expansion with large language models. arXiv preprint arXiv:2303.07678 (2023). +[133] Lei Wang, Jingsen Zhang, Hao Yang, Zhiyuan Chen, Jiakai Tang, Zeyu Zhang, Xu Chen, Yankai Lin, Ruihua Song, Wayne Xin Zhao, et al. 2023. User behavior simulation with large language model based agents. arXiv preprint arXiv:2306.02552 (2023). +[134] Xintao Wang, Yunze Xiao, Jen-tse Huang, Siyu Yuan, Rui Xu, Haoran Guo, Quan Tu, Yaying Fei, Ziang Leng, Wei Wang, et al. 2023. Incharacter: Evaluating personality fidelity in role-playing agents through psychological interviews. arXiv preprint arXiv:2310.17976 (2023). +[135] Yixiao Wang, Homa Fashandi, and Kevin Ferreira. 2024. Investigating the Personality Consistency in Quantized Role-Playing Dialogue Agents. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: Industry Track. 239–255. +[136] Yu Wang, Yifan Gao, Xiusi Chen, Haoming Jiang, Shiyang Li, Jingfeng Yang, Qingyu Yin, Zheng Li, Xian Li, Bing Yin, et al. [n.d.]. MEMORYLLM: Towards Self-Updatable Large Language Models. In Forty-first International Conference on Machine Learning. +[137] Zheng Wang, Zhongyang Li, Zeren Jiang, Dandan Tu, and Wei Shi. 2024. Crafting Personalized Agents through Retrieval-Augmented Generation on Editable Memory Graphs. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing. 4891-4906. +[138] Zijie J Wang and Duen Horng Chau. 2024. MeMemo: On-device Retrieval Augmentation for Private and Personalized Text Generation. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 2765-2770. +[139] Zekun Moore Wang, Zhongyuan Peng, Haoran Que, Jiaheng Liu, Wangchunshu Zhou, Yuhan Wu, Hongcheng Guo, Ruitong Gan, Zehao Ni, Jian Yang, et al. 2023. Rolellm: Benchmarking, eliciting, and enhancing role-playing abilities of large language models. arXiv preprint arXiv:2310.00746 (2023). +[140] Tianxin Wei, Bowen Jin, Ruirui Li, Hansi Zeng, Zhengyang Wang, Jianhui Sun, Qingyu Yin, Hanqing Lu, Suhang Wang, Jingrui He, et al. 2024. Towards unified multi-modal personalization: Large vision-language models for generative recommendation and beyond. arXiv preprint arXiv:2403.10667 (2024). +[141] Robert Wetzker, Carsten Zimmermann, and Christian Bauchage. 2008. Analyzing social bookmarking systems: A del. icio. us cookbook. In Proceedings of the ECAI 2008 Mining Social Data Workshop. 26-30. +[142] Stanisław Wozniak, Bartlomiej Koptyra, Arkadiusz Janz, Przemysław Kazienko, and Jan Kocón. 2024. Personalized large language models. arXiv preprint arXiv:2402.09269 (2024). +[143] Junde Wu, Jiayuan Zhu, Yunli Qi, Jingkun Chen, Min Xu, Filippo Menolascina, and Vicente Grau. 2024. Medical graph rag: Towards safe medical large language model via graph retrieval-augmented generation. arXiv preprint arXiv:2408.04187 (2024). +[144] Xuan Wu, Dong Zhou, Yu Xu, and Seamus Lawless. 2017. Personalized query expansion utilizing multi-relational social data. In 2017 12th International Workshop on Semantic and Social Media Adaptation and Personalization (SMAP). IEEE, 65-70. + +[145] Yunjia Xi, Weiwen Liu, Jianghao Lin, Xiaoling Cai, Hong Zhu, Jieming Zhu, Bo Chen, Ruiming Tang, Weinan Zhang, and Yong Yu. 2024. Towards open-world recommendation with knowledge augmentation from large language models. In Proceedings of the 18th ACM Conference on Recommender Systems. 12-22. +[146] Zhiheng Xi, Wenxiang Chen, Xin Guo, Wei He, Yiwen Ding, Boyang Hong, Ming Zhang, Junzhe Wang, Senjie Jin, Enyu Zhou, et al. 2025. The rise and potential of large language model based agents: A survey. Science China Information Sciences 68, 2 (2025), 121101. +[147] Shitao Xiao, Zheng Liu, Peitian Zhang, Niklas Muennighoff, Defu Lian, and Jian-Yun Nie. 2024. C-pack: Packed resources for general chinese embeddings. In Proceedings of the 47th international ACM SIGIR conference on research and development in information retrieval. 641-649. +[148] Huatao Xu, Liying Han, Qirui Yang, Mo Li, and Mani Srivastava. 2024. Penetrative ai: Making llms comprehend the physical world. In Proceedings of the 25th International Workshop on Mobile Computing Systems and Applications. 1-7. +[149] Hongyan Xu, Hongtao Liu, Pengfei Jiao, and Wenjun Wang. 2021. Transformer reasoning network for personalized review summarization. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1452-1461. +[150] Xinchao Xu, Zhibin Gou, Wenquan Wu, Zheng-Yu Niu, Hua Wu, Haifeng Wang, and Shihang Wang. 2022. Long time no see! open-domain conversation with long-term persona memory. arXiv preprint arXiv:2203.05797 (2022). +[151] Yiyan Xu, Jinghao Zhang, Alireza Salemi, Xinting Hu, Wenjie Wang, Fuli Feng, Hamed Zamani, Xiangnan He, and Tat-Seng Chua. 2025. Personalized Generation In Large Model Era: A Survey. arXiv preprint arXiv:2503.02614 (2025). +[152] Hao Yu, Xin Yang, Xin Gao, Yan Kang, Hao Wang, Junbo Zhang, and Tianrui Li. 2024. Personalized federated continual learning via multi-granularity prompt. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 4023-4034. +[153] Xiaoyan Yu, Tongxu Luo, Yifan Wei, Fangyu Lei, Yiming Huang, Hao Peng, and Liehuang Zhu. 2024. Neeko: Leveraging dynamic lora for efficient multi-character role-playing agent. arXiv preprint arXiv:2402.13717 (2024). +[154] Xinfeng Yuan, Siyu Yuan, Yuhan Cui, Tianhe Lin, Xintao Wang, Rui Xu, Jiangjie Chen, and Deqing Yang. 2024. Evaluating character understanding of large language models via character profiling from fictional works. arXiv preprint arXiv:2404.12726 (2024). +[155] Hansi Zeng, Surya Kallumadi, Zaid Alibadi, Rodrigo Nogueira, and Hamed Zamani. 2023. A personalized dense retrieval framework for unified information access. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval. 121-130. +[156] Saber Zerhoudi and Michael Granitzer. 2024. PersonaRAG: Enhancing Retrieval-Augmented Generation Systems with User-Centric Agents. arXiv preprint arXiv:2407.09394 (2024). +[157] Han Zhang, Songlin Wang, Kang Zhang, Zhiling Tang, Yunjiang Jiang, Yun Xiao, Weipeng Yan, and Wen-Yun Yang. 2020. Towards personalized and semantic retrieval: An end-to-end solution for e-commerce search via embedding learning. In Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval. 2407-2416. +[158] Jiarui Zhang. 2024. Guided profile generation improves personalization with llms. arXiv preprint arXiv:2409.13093 (2024). +[159] Jesse Zhang, Jiahui Zhang, Karl Pertsch, Ziyi Liu, Xiang Ren, Minsuk Chang, Shao-Hua Sun, and Joseph J Lim. [n.d.]. Bootstrap Your Own Skills: Learning to Solve New Tasks with Large Language Model Guidance. In 7th Annual Conference on Robot Learning. +[160] Kai Zhang, Yangyang Kang, Fubang Zhao, and Xiaozhong Liu. 2023. LLM-based medical assistant personalization with short-and long-term memory coordination. arXiv preprint arXiv:2309.11696 (2023). +[161] Kaiyan Zhang, Jianyu Wang, Ermo Hua, Biqing Qi, Ning Ding, and Bowen Zhou. 2024. Cogenesis: A framework collaborating large and small language models for secure context-aware instruction following. arXiv preprint arXiv:2403.03129 (2024). +[162] Kai Zhang, Fubang Zhao, Yangyang Kang, and Xiaozhong Liu. 2023. Memory-augmented llm personalization with short-and long-term memory coordination. arXiv preprint arXiv:2309.11696 (2023). +[163] Wenlin Zhang, Chuhan Wu, Xiangyang Li, Yuhao Wang, Kuicai Dong, Yichao Wang, Xinyi Dai, Xiangyu Zhao, Huifeng Guo, and Ruiming Tang. 2025. LLMTreeRec: Unleashing the Power of Large Language Models for Cold-Start Recommendations. In Proceedings of the 31st International Conference on Computational Linguistics. 886-896. +[164] Yanyue Zhang, Yulan He, and Deyu Zhou. 2025. Rehearse With User: Personalized Opinion Summarization via Role-Playing based on Large Language Models. arXiv preprint arXiv:2503.00449 (2025). +[165] You Zhang, Jin Wang, Liang-Chih Yu, Dan Xu, and Xuejie Zhang. 2024. Personalized LoRA for human-centered text understanding. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 38. 1958-19596. +[166] Yabin Zhang, Wenhui Yu, Erhan Zhang, Xu Chen, Lantao Hu, Peng Jiang, and Kun Gai. 2024. Recgpt: Generative personalized prompts for sequential recommendation via chatgpt training paradigm. arXiv preprint arXiv:2404.08675 (2024). +[167] Zeyu Zhang, Xiaohe Bo, Chen Ma, Rui Li, Xu Chen, Quanyu Dai, Jieming Zhu, Zhenhua Dong, and Ji-Rong Wen. 2024. A survey on the memory mechanism of large language model based agents. arXiv preprint arXiv:2404.13501 (2024). +[168] Zhehao Zhang, Ryan A Rossi, Branislav Kveton, Yijia Shao, Diyi Yang, Hamed Zamani, Franck Dernoncourt, Joe Barrow, Tong Yu, Sungchul Kim, et al. 2024. Personalization of large language models: A survey. arXiv preprint arXiv:2411.00027 (2024). +[169] Yi Zheng, Chongyang Ma, Kanle Shi, and Haibin Huang. 2023. Agents meet okr: An object and key results driven agent system with hierarchical self-collaboration and self-evaluation. arXiv preprint arXiv:2311.16542 (2023). +[170] Hanxun Zhong, Zhicheng Dou, Yutao Zhu, Hongjin Qian, and Ji-Rong Wen. 2022. Less is more: Learning to refine dialogue history for personalized dialogue generation. arXiv preprint arXiv:2204.08128 (2022). +[171] Wanjun Zhong, Duyu Tang, Jiahai Wang, Jian Yin, and Nan Duan. 2021. UserAdapter: Few-shot user learning in sentiment analysis. In Findings of the Association for Computational Linguistics: ACL-JJCNLP 2021. 1484-1488. + +[172] Dong Zhou, Séamus Lawless, and Vincent Wade. 2012. Improving search via personalized query expansion using social media. Information retrieval 15 (2012), 218-242. +[173] Denny Zhou, Nathanael Scharli, Le Hou, Jason Wei, Nathan Scales, Xuezhi Wang, Dale Schuurmans, Claire Cui, Olivier Bousquet, Quoc Le, et al. 2022. Least-to-most prompting enables complex reasoning in large language models. arXiv preprint arXiv:2205.10625 (2022). +[174] Yujia Zhou, Qiannan Zhu, Jiajie Jin, and Zhicheng Dou. 2024. Cognitive personalized search integrating large language models with an efficient memory mechanism. In Proceedings of the ACM Web Conference 2024. 1464-1473. +[175] Yuchen Zhuang, Haotian Sun, Yue Yu, Rushi Qiang, Qifan Wang, Chao Zhang, and Bo Dai. [n.d.]. Hydra: Model factorization framework for black-box llm personalization, 2024. URL https://arxiv.org/abs/2406.02888 ([n.d.]). + +Received 20 February 2007; revised 12 March 2009; accepted 5 June 2009 \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10147/images/3888cac0ece5531f078c8ec3406df643f228458552c866b9ad79273c9d66181b.jpg b/data/2025/2504_10xxx/2504.10147/images/3888cac0ece5531f078c8ec3406df643f228458552c866b9ad79273c9d66181b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c82b15a32294a0867b97456d7d2341cf69190887 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10147/images/3888cac0ece5531f078c8ec3406df643f228458552c866b9ad79273c9d66181b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18afa0a7fc35cc841edc4dcab73b26f362341d21f4a2d27bfa3dda9bb5a5f196 +size 170253 diff --git a/data/2025/2504_10xxx/2504.10147/images/431fadc2fb6b837e6448de02affe85f7a85af23cf964b4cebff641487aab910d.jpg b/data/2025/2504_10xxx/2504.10147/images/431fadc2fb6b837e6448de02affe85f7a85af23cf964b4cebff641487aab910d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..378b451c9ddd4405eb4998fc8b727b3aa397e02e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10147/images/431fadc2fb6b837e6448de02affe85f7a85af23cf964b4cebff641487aab910d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d68c8eb13f17b17691c1e4287c45b8f5cc83c642645d64bbf67e7d91190cd0c +size 39749 diff --git a/data/2025/2504_10xxx/2504.10147/images/684289db43c065be977860260a5ca5599fa5f676c9663e41befff2d9bcc9c089.jpg b/data/2025/2504_10xxx/2504.10147/images/684289db43c065be977860260a5ca5599fa5f676c9663e41befff2d9bcc9c089.jpg new file mode 100644 index 0000000000000000000000000000000000000000..20740e2df11badd80228961fe7d3413c8c8119ca --- /dev/null +++ b/data/2025/2504_10xxx/2504.10147/images/684289db43c065be977860260a5ca5599fa5f676c9663e41befff2d9bcc9c089.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5f7d16dbe27428ca20f0ca94e22d2fe117f8e77aedaba58a132be5cc110817b +size 42125 diff --git a/data/2025/2504_10xxx/2504.10147/images/7122c71aefb1637d6b1b8ecf544934b92473bd4011b205e6f3793fb7593c1f2c.jpg b/data/2025/2504_10xxx/2504.10147/images/7122c71aefb1637d6b1b8ecf544934b92473bd4011b205e6f3793fb7593c1f2c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7c8bd88c314ea62e3294a83c36a8c6f3da10aa2f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10147/images/7122c71aefb1637d6b1b8ecf544934b92473bd4011b205e6f3793fb7593c1f2c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21aec649f1d7bf3b8aea1b7bb7f31f26dfdbccc3ef784946933441e6285a6df9 +size 3687 diff --git a/data/2025/2504_10xxx/2504.10147/images/8f9e5a0db9555c1a4ed96186bb98667b2c4fd2abae37751b9dee8a95b60f65c1.jpg b/data/2025/2504_10xxx/2504.10147/images/8f9e5a0db9555c1a4ed96186bb98667b2c4fd2abae37751b9dee8a95b60f65c1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eab7b3395f868fef3399cc2619b10c5510488ae1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10147/images/8f9e5a0db9555c1a4ed96186bb98667b2c4fd2abae37751b9dee8a95b60f65c1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:608f8605225d85a016df84ca5ab528b1fc2b810d6e253c0be178dfc2db74a2ba +size 80585 diff --git a/data/2025/2504_10xxx/2504.10147/images/91c490fa5f8c0af657eecb79cedc896d6a77e119d6fb8493fe9338606a50ca55.jpg b/data/2025/2504_10xxx/2504.10147/images/91c490fa5f8c0af657eecb79cedc896d6a77e119d6fb8493fe9338606a50ca55.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f9d2fc7b1df39e38a1a0175541e8a06b36631f73 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10147/images/91c490fa5f8c0af657eecb79cedc896d6a77e119d6fb8493fe9338606a50ca55.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9555fe6ec8d37c2d9195083f4667eb4e1de0eb4d7e8fb959da1a52b0a0dce222 +size 3132 diff --git a/data/2025/2504_10xxx/2504.10147/images/c1f227eab52914058e52cd6428ef57f5faa6d829581489e901866e44230f1223.jpg b/data/2025/2504_10xxx/2504.10147/images/c1f227eab52914058e52cd6428ef57f5faa6d829581489e901866e44230f1223.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3243ccdeb19d540b617fb9c2d7716a841c316c58 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10147/images/c1f227eab52914058e52cd6428ef57f5faa6d829581489e901866e44230f1223.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff86b58b3122d2356ee77b8c781103684f8322145c96df115dd48a70e16db601 +size 150070 diff --git a/data/2025/2504_10xxx/2504.10147/images/c5b36e8fc5d85eef240bb82935b05407c1e84bf402d0060b0bb413dc04743f40.jpg b/data/2025/2504_10xxx/2504.10147/images/c5b36e8fc5d85eef240bb82935b05407c1e84bf402d0060b0bb413dc04743f40.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d0917dc122a54def6d80749c9dfda4893a8cdf27 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10147/images/c5b36e8fc5d85eef240bb82935b05407c1e84bf402d0060b0bb413dc04743f40.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ba59c5e64b32dd14b5c5b1dd8fd173693729868575000d7dc3c4f119916b527 +size 5289 diff --git a/data/2025/2504_10xxx/2504.10147/images/d86abcf5e5ec9f4d1a1e2cf178e6e2a2411500ea7111c2031609d3b0586d7569.jpg b/data/2025/2504_10xxx/2504.10147/images/d86abcf5e5ec9f4d1a1e2cf178e6e2a2411500ea7111c2031609d3b0586d7569.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f839dc773918e5e6d8e0f65338642f0d2d177192 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10147/images/d86abcf5e5ec9f4d1a1e2cf178e6e2a2411500ea7111c2031609d3b0586d7569.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:278441d29b632ade3748c4e2e61d1b08a3763781384b3964ffe2066b9d6e56ab +size 164671 diff --git a/data/2025/2504_10xxx/2504.10147/images/eddf6ee96c6208637db90f8b20f4af142d11c804484f15c3dc8e58752b900628.jpg b/data/2025/2504_10xxx/2504.10147/images/eddf6ee96c6208637db90f8b20f4af142d11c804484f15c3dc8e58752b900628.jpg new file mode 100644 index 0000000000000000000000000000000000000000..212bf36fd92f654edeb42cd025df4a71e19ec8e3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10147/images/eddf6ee96c6208637db90f8b20f4af142d11c804484f15c3dc8e58752b900628.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4cb6f660e71e65b9bdca3ae6d5442539861ed7ea29196115966fdc5f695f5b37 +size 95542 diff --git a/data/2025/2504_10xxx/2504.10147/images/faaeeb7d36800aed003df61ec8252e783b23b8a2f81ce0769f1b50247566d8e0.jpg b/data/2025/2504_10xxx/2504.10147/images/faaeeb7d36800aed003df61ec8252e783b23b8a2f81ce0769f1b50247566d8e0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f49e968a47848a9905a3507213391e2fe472fc00 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10147/images/faaeeb7d36800aed003df61ec8252e783b23b8a2f81ce0769f1b50247566d8e0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02a0595cda3127c04f4f6ef4cca06d0e48da79f0ea55a7c405cd5450ca64c326 +size 2579 diff --git a/data/2025/2504_10xxx/2504.10147/layout.json b/data/2025/2504_10xxx/2504.10147/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..6aa343ae977e838d73ddae9fc0f0b0ba1e624243 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10147/layout.json @@ -0,0 +1,14337 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 70, + 92, + 337, + 107 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 92, + 337, + 107 + ], + "spans": [ + { + "bbox": [ + 70, + 92, + 337, + 107 + ], + "type": "text", + "content": "A Survey of Personalization: From RAG to Agent" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 118, + 302, + 132 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 118, + 302, + 132 + ], + "spans": [ + { + "bbox": [ + 69, + 118, + 302, + 132 + ], + "type": "text", + "content": "XIAOPENG LI*, City University of Hong Kong, Hong Kong" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 71, + 134, + 304, + 147 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 134, + 304, + 147 + ], + "spans": [ + { + "bbox": [ + 71, + 134, + 304, + 147 + ], + "type": "text", + "content": "PENGYUE JIA*, City University of Hong Kong, Hong Kong" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 71, + 150, + 503, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 150, + 503, + 163 + ], + "spans": [ + { + "bbox": [ + 71, + 150, + 503, + 163 + ], + "type": "text", + "content": "DERONG XU, City University of Hong Kong, Hong Kong and University of Science and Technology of China, China" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 71, + 167, + 272, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 167, + 272, + 179 + ], + "spans": [ + { + "bbox": [ + 71, + 167, + 272, + 179 + ], + "type": "text", + "content": "YI WEN, City University of Hong Kong, Hong Kong" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 71, + 183, + 473, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 183, + 473, + 196 + ], + "spans": [ + { + "bbox": [ + 71, + 183, + 473, + 196 + ], + "type": "text", + "content": "YINGYI ZHANG, City University of Hong Kong, Hong Kong and Dalian University of Technology, China" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 71, + 199, + 315, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 199, + 315, + 213 + ], + "spans": [ + { + "bbox": [ + 71, + 199, + 315, + 213 + ], + "type": "text", + "content": "WENLIN ZHANG, City University of Hong Kong, Hong Kong" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 71, + 216, + 307, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 216, + 307, + 228 + ], + "spans": [ + { + "bbox": [ + 71, + 216, + 307, + 228 + ], + "type": "text", + "content": "WANYU WANG, City University of Hong Kong, Hong Kong" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 71, + 232, + 268, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 232, + 268, + 243 + ], + "spans": [ + { + "bbox": [ + 71, + 232, + 268, + 243 + ], + "type": "text", + "content": "YICHAO WANG, Noah's Ark Lab, Huawei, China" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 71, + 248, + 279, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 248, + 279, + 260 + ], + "spans": [ + { + "bbox": [ + 71, + 248, + 279, + 260 + ], + "type": "text", + "content": "ZHAOCHENG DU, Noah's Ark Lab, Huawei, China" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 71, + 264, + 266, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 264, + 266, + 276 + ], + "spans": [ + { + "bbox": [ + 71, + 264, + 266, + 276 + ], + "type": "text", + "content": "XIANGYANG LI, Noah's Ark Lab, Huawei, China" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 71, + 281, + 258, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 281, + 258, + 293 + ], + "spans": [ + { + "bbox": [ + 71, + 281, + 258, + 293 + ], + "type": "text", + "content": "YONG LIU, Noah's Ark Lab, Huawei, Singapore" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 71, + 297, + 266, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 297, + 266, + 308 + ], + "spans": [ + { + "bbox": [ + 71, + 297, + 266, + 308 + ], + "type": "text", + "content": "HUIFENG GUO, Noah's Ark Lab, Huawei, China" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 71, + 312, + 276, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 312, + 276, + 324 + ], + "spans": [ + { + "bbox": [ + 71, + 312, + 276, + 324 + ], + "type": "text", + "content": "RUIMING TANG†, Noah's Ark Lab, Huawei, China" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 70, + 329, + 317, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 329, + 317, + 342 + ], + "spans": [ + { + "bbox": [ + 70, + 329, + 317, + 342 + ], + "type": "text", + "content": "XIANGYU ZHAO†, City University of Hong Kong, Hong Kong" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 350, + 503, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 350, + 503, + 473 + ], + "spans": [ + { + "bbox": [ + 69, + 350, + 503, + 473 + ], + "type": "text", + "content": "Personalization has become an essential capability in modern AI systems, enabling customized interactions that align with individual user preferences, contexts, and goals. Recent research has increasingly concentrated on Retrieval-Augmented Generation (RAG) frameworks and their evolution into more advanced agent-based architectures within personalized settings to enhance user satisfaction. Building on this foundation, this survey systematically examines personalization across the three core stages of RAG: pre-retrieval, retrieval, and generation. Beyond RAG, we further extend its capabilities into the realm of Personalized LLM-based Agents, which enhance traditional RAG systems with agentic functionalities, including user understanding, personalized planning and execution, and dynamic generation. For both personalization in RAG and agent-based personalization, we provide formal definitions, conduct a comprehensive review of recent literature, and summarize key datasets and evaluation metrics. Additionally, we discuss fundamental challenges, limitations, and promising research directions in this evolving field. Relevant papers and resources are continuously updated at the Github Repo1." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 70, + 486, + 274, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 486, + 274, + 496 + ], + "spans": [ + { + "bbox": [ + 70, + 486, + 274, + 496 + ], + "type": "text", + "content": "CCS Concepts: " + }, + { + "bbox": [ + 70, + 486, + 274, + 496 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 70, + 486, + 274, + 496 + ], + "type": "text", + "content": " Information systems " + }, + { + "bbox": [ + 70, + 486, + 274, + 496 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 70, + 486, + 274, + 496 + ], + "type": "text", + "content": " Personalization." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 69, + 509, + 454, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 509, + 454, + 520 + ], + "spans": [ + { + "bbox": [ + 69, + 509, + 454, + 520 + ], + "type": "text", + "content": "Additional Key Words and Phrases: Large Language Model, Retrieval-Augmented Generation, Agent, Personalization" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 70, + 537, + 328, + 564 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 70, + 537, + 328, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 537, + 328, + 547 + ], + "spans": [ + { + "bbox": [ + 70, + 537, + 328, + 547 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 70, + 537, + 328, + 547 + ], + "type": "text", + "content": "https://github.com/Applied-Machine-Learning-Lab/Awesome-Personalized-RAG-Agent" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 71, + 547, + 132, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 547, + 132, + 555 + ], + "spans": [ + { + "bbox": [ + 71, + 547, + 132, + 555 + ], + "type": "text", + "content": "* Equal contribution." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 71, + 555, + 147, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 555, + 147, + 564 + ], + "spans": [ + { + "bbox": [ + 71, + 555, + 147, + 564 + ], + "type": "text", + "content": "† Corresponding authors." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 69, + 583, + 503, + 623 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 583, + 503, + 623 + ], + "spans": [ + { + "bbox": [ + 69, + 583, + 503, + 623 + ], + "type": "text", + "content": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 70, + 624, + 310, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 624, + 310, + 633 + ], + "spans": [ + { + "bbox": [ + 70, + 624, + 310, + 633 + ], + "type": "text", + "content": "© 2018 Copyright held by the owner/author(s). Publication rights licensed to ACM." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 71, + 635, + 160, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 635, + 160, + 643 + ], + "spans": [ + { + "bbox": [ + 71, + 635, + 160, + 643 + ], + "type": "text", + "content": "Manuscript submitted to ACM" + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 216, + 37, + 561 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 216, + 37, + 561 + ], + "spans": [ + { + "bbox": [ + 14, + 216, + 37, + 561 + ], + "type": "text", + "content": "arXiv:2504.10147v1 [cs.IR] 14 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 284, + 658, + 288, + 666 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 658, + 288, + 666 + ], + "spans": [ + { + "bbox": [ + 284, + 658, + 288, + 666 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 97, + 198, + 106 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 97, + 198, + 106 + ], + "spans": [ + { + "bbox": [ + 107, + 97, + 198, + 106 + ], + "type": "text", + "content": "ACM Reference Format:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 109, + 541, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 109, + 541, + 157 + ], + "spans": [ + { + "bbox": [ + 107, + 109, + 541, + 157 + ], + "type": "text", + "content": "Xiaopeng Li*, Pengyue Jia*, Derong Xu, Yi Wen, Yingyi Zhang, Wenlin Zhang, Wanyu Wang, Yichao Wang, Zhaocheng Du, Xiangyang Li, Yong Liu, Huifeng Guo, Ruiming Tang†, and Xiangyu Zhao†. 2018. A Survey of Personalization: From RAG to Agent. In Proceedings of Make sure to enter the correct conference title from your rights confirmation email (Conference acronym 'XX). ACM, New York, NY, USA, 25 pages. https://doi.org/XXXXXXXX.XXXXXXXXXX" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 108, + 171, + 195, + 182 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 171, + 195, + 182 + ], + "spans": [ + { + "bbox": [ + 108, + 171, + 195, + 182 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 189, + 541, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 189, + 541, + 269 + ], + "spans": [ + { + "bbox": [ + 107, + 189, + 541, + 269 + ], + "type": "text", + "content": "Large Language Models (LLMs) have revolutionized AI-driven applications by enabling natural language understanding and generation at an unprecedented scale. However, these models often suffer from issues such as outdated responses and hallucinations, which severely hinder the accuracy of information generation. Retrieval-Augmented Generation (RAG) has emerged as a promising framework that integrates retrieved information from external corpora, such as external APIs [13, 36], scientific repositories [86, 124] or domain-specific databases [4, 31], ensuring more knowledge-grounded and up-to-date outputs." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 271, + 541, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 271, + 541, + 406 + ], + "spans": [ + { + "bbox": [ + 107, + 271, + 541, + 406 + ], + "type": "text", + "content": "Its versatility has led to significant applications across various domains, including question answering [115], enterprise search [16] and healthcare [143], etc. Among these applications, one particularly notable area is in agent workflows, where RAG enhances autonomous systems by providing context-aware, dynamically retrieved, and reliable knowledge. This is because each stage of the RAG process closely mirrors key aspects of an agent's workflow, as shown in Figure 1. For instance, the query rewriting phase in RAG, which involves semantic understanding and parsing, aligns with the semantic comprehension stage in agent workflows. Likewise, RAG's retrieval phase, which focuses on extracting the most relevant documents, corresponds to the planning and execution phases of an agent, where decisions are made based on retrieved knowledge. Finally, the generation phase in RAG parallels an agent's execution stage, where actions are performed based on the given task. This structural alignment suggests that the architecture of RAG is fundamentally converging with agent workflows, solidifying its position as a key facilitator of intelligent and autonomous systems." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 408, + 541, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 408, + 541, + 543 + ], + "spans": [ + { + "bbox": [ + 107, + 408, + 541, + 543 + ], + "type": "text", + "content": "Although the structural alignment between RAG and agent workflows highlights their deepening convergence, a critical next step in enhancing these intelligent systems lies in personalization. Personalization is a key driver toward achieving more adaptive and context-aware AI, which is fundamental for the progression toward Artificial General Intelligence (AGI). It plays an essential role in applications such as personalized reasoning [39, 149], adaptive decision-making [72], user-specific content generation [109, 151], and interactive AI systems [73, 92]. However, existing research lacks a comprehensive comparative analysis of personalized RAG and agentic approaches. Current surveys primarily focus on general RAG methodologies [32, 35] or agent-related literature [63, 131, 167], without systematically exploring their implications for personalization. While recent works such as [68, 168] discuss personalization, they predominantly address personalized generation within LLMs or specific downstream tasks, overlooking how personalization can be effectively integrated into RAG and agent workflows." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 544, + 541, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 544, + 541, + 584 + ], + "spans": [ + { + "bbox": [ + 107, + 544, + 541, + 584 + ], + "type": "text", + "content": "Motivated by the above issues, this survey aims to provide a comprehensive review of the integration of personalization into RAG and agentic RAG frameworks to enhance user experiences and optimize satisfaction. The key contributions of this work can be summarized as follows:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 590, + 538, + 654 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 107, + 590, + 538, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 590, + 538, + 627 + ], + "spans": [ + { + "bbox": [ + 107, + 590, + 538, + 627 + ], + "type": "text", + "content": "- We provide an extensive exploration of the existing literature on how personalization is integrated into various stages of RAG (pre-retrieval, retrieval, and generation) and agentic RAG (understanding, planning, execution, and generation)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 630, + 538, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 630, + 538, + 654 + ], + "spans": [ + { + "bbox": [ + 107, + 630, + 538, + 654 + ], + "type": "text", + "content": "- We summarize the key datasets, benchmarks, and evaluation metrics used in existing research for each subtask to facilitate future studies in the respective domains." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "spans": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 477, + 72, + 539, + 80 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 477, + 72, + 539, + 80 + ], + "spans": [ + { + "bbox": [ + 477, + 72, + 539, + 80 + ], + "type": "text", + "content": "X. Li and P. Jia, et al." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 321, + 658, + 326, + 665 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 658, + 326, + 665 + ], + "spans": [ + { + "bbox": [ + 321, + 658, + 326, + 665 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 170, + 98, + 416, + 357 + ], + "blocks": [ + { + "bbox": [ + 170, + 98, + 416, + 357 + ], + "lines": [ + { + "bbox": [ + 170, + 98, + 416, + 357 + ], + "spans": [ + { + "bbox": [ + 170, + 98, + 416, + 357 + ], + "type": "image", + "image_path": "eddf6ee96c6208637db90f8b20f4af142d11c804484f15c3dc8e58752b900628.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 170, + 372, + 402, + 383 + ], + "lines": [ + { + "bbox": [ + 170, + 372, + 402, + 383 + ], + "spans": [ + { + "bbox": [ + 170, + 372, + 402, + 383 + ], + "type": "text", + "content": "Fig. 1. Correlation between personalization and RAG with agent flow." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 410, + 501, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 410, + 501, + 435 + ], + "spans": [ + { + "bbox": [ + 70, + 410, + 501, + 435 + ], + "type": "text", + "content": "- We also highlight the limitations of current research and suggest future directions for personalized RAG, emphasizing potential advancements to address existing challenges." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 443, + 503, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 443, + 503, + 509 + ], + "spans": [ + { + "bbox": [ + 70, + 443, + 503, + 509 + ], + "type": "text", + "content": "The outline of this survey is as follows: we introduce what is personalization (Sec. 2) and explain how personalization is adopted into RAG pipeline (Sec. 3). Then, we present a literature review on where to integrate personalization within different stages of RAG and agentic RAG workflows (Sec. 4) and discuss the key datasets and evaluation metrics used in existing research (Sec.5). Lastly, we present a discussion on the limitations of current research and future directions (Sec. 6)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 525, + 211, + 534 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 525, + 211, + 534 + ], + "spans": [ + { + "bbox": [ + 70, + 525, + 211, + 534 + ], + "type": "text", + "content": "2 WHAT IS PERSONALIZATION" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 541, + 501, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 541, + 501, + 594 + ], + "spans": [ + { + "bbox": [ + 70, + 541, + 501, + 594 + ], + "type": "text", + "content": "Personalization in current research refers to the tailoring of model predictions or generated content to align with an individual's preferences. In the context of RAG and agents, personalization involves incorporating user-specific information at various stages of the RAG pipeline or within agents. User personalization can be categorized into the following types:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 601, + 501, + 653 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 70, + 601, + 501, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 601, + 501, + 626 + ], + "spans": [ + { + "bbox": [ + 70, + 601, + 501, + 626 + ], + "type": "text", + "content": "- Explicit User Profile: Explicitly presented user information, including biographical details, attributes (e.g., age, location, gender, education), and social connections (e.g., social networks)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 629, + 501, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 629, + 501, + 653 + ], + "spans": [ + { + "bbox": [ + 70, + 629, + 501, + 653 + ], + "type": "text", + "content": "- User Historical Interactions: Behavioral data, including browsing history, clicks, and purchases, which help infer user interests and preferences to improve personalization." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 70, + 72, + 212, + 81 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 72, + 212, + 81 + ], + "spans": [ + { + "bbox": [ + 70, + 72, + 212, + 81 + ], + "type": "text", + "content": "A Survey of Personalization: From RAG to Agent" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "spans": [ + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 284, + 658, + 289, + 666 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 658, + 289, + 666 + ], + "spans": [ + { + "bbox": [ + 284, + 658, + 289, + 666 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 109, + 110, + 538, + 396 + ], + "blocks": [ + { + "bbox": [ + 238, + 94, + 408, + 104 + ], + "lines": [ + { + "bbox": [ + 238, + 94, + 408, + 104 + ], + "spans": [ + { + "bbox": [ + 238, + 94, + 408, + 104 + ], + "type": "text", + "content": "Table 1. Overview of Personalized RAG and Agent." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 109, + 110, + 538, + 396 + ], + "lines": [ + { + "bbox": [ + 109, + 110, + 538, + 396 + ], + "spans": [ + { + "bbox": [ + 109, + 110, + 538, + 396 + ], + "type": "table", + "html": "
FieldSub-fieldSubsub-fieldPapers
Pre-retrievalQuery RewritingLearning to Personalized Query RewriteCLE-QR [60], CGF [38], PEARL [80]
LLM to Personalized Query RewriteLeast-to-Most Prompting [173], ERAGent [112], CoPS [174], Agent4Ranking [61], FIG [22], BASES [99]
Query ExpansionTagging-based query expansionGossiple [10], Biancalana and Micarelli [12], SoQuES [15], Zhou et al. [172]
ElseLin and Huang [66], Bender et al. [9], Axiomatic PQEC [79], WE-LM [144], PSQE [14], PQEWC [7]
OthersBobo [33], Kannadasan and Aslanyan [52], PSQE [8]
RetrievalIndexingPEARL [80], KG-Retriever [21], EMG-RAG [137], PGraphRAG [5]
RetrievalDense RetrievalMeMemo [138], RECAP [71], LAPDOG [43], Gu et al. [37], PersonalLM [77], UIA [155], XPERT [125], DPSR [157], RTM [11], Pearl [80], MemPrompt [74], EERRA [23], MALP [160], USER-LLM [84], PER-PCS [120]
Sparse RetrievalOPPU [121], PAG [101], Au et al. [5], UniMS-RAG [128], Deng et al. [29],
Prompt-based RetrievalLAPS [50], UniIMP [140], Shen et al. [111]
OthersSalem et al. [103], PersonalITM [65], Zhang et al. [165]
Post-retrievalPersonaRAG [156], Pavliukevich et al. [89], UniMS-RAG [128], Salemi and Zamani [106], Zhang et al. [164], AutoCompressors [24], FIT-RAG [76]
GenerationGeneration from Explicit PreferencesDirect PromptingP² [49], Character Profiling [154] OpinionQA [107], Kang et al. [51], Liu et al. [67], Cue-CoT [129], TICL [26]
Profile-Augmented PromptingGPG [158], Richardson et al. [101], ONCE [70], LLMTreeRec [163], KAR [145], Matryoshka [58]
Personalized-Prompt PromptingLi et al. [57], RecGPT [166], PEPLER-D [59], GRAPA [94], SGPT [28], PFCL [152]
Generation from Implicit PreferencesFine-tuning-Based MethodsPLoRA [165], LM-P [142], MiLP [165], OPPU [122], PER-PCS [120], Review-LLM [91], UserIdentifier [78], UserAdapter [171], HYDRA [175], PocketLLM [90], CoGenesis [161]
Reinforcement Learning-Based MethodsP-RLHF [62], P-SOUPS [47], PAD [20], REST-PG [104], Salemi et al. [103], RewrimerSIRI [57],Kulkarni et al. [54]
From RAG to AgentPersonalized UnderstandingIn user-profile understandingXu et al. [148], Abbasian et al. [2],
In agent's role understandingRoleLLM [139], Character-LLM [110], Wang et al. [134],
In agent's user-role joint understandingSocialBench [18], Dai et al. [27], Ran et al. [96], Wang et al. [126], Tu et al. [123], Necko [153]
Personalized Planning and ExecutionMemory ManagementEMG-RAG [137], Park et al. [87], Abbasian et al. [2], RecAgent [133], TravelPlanner+ [114], PersonalWAB [17], VOYAGER [127], MemoeryLLM [136]
Tool and API CallingVOYAGER [127], Zhang et al. [159], PUMA [17], Wang et al. [126], PenetrativeAI [148], Huang et al. [44], [87], MetaGPT [40], OKR-Agent [169]
Personalized GenerationAlignment with User FactCharacter-LLM [110], Wang et al. [135], Dai et al. [27]
Alignment with User PreferencesWang et al. [139], Ran et al. [96], Wang et al. [134], Chen et al. [18]
", + "image_path": "3888cac0ece5531f078c8ec3406df643f228458552c866b9ad79273c9d66181b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 420, + 538, + 458 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 107, + 420, + 538, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 420, + 538, + 443 + ], + "spans": [ + { + "bbox": [ + 107, + 420, + 538, + 443 + ], + "type": "text", + "content": "- User Historical Content: Implicit personalization derived from user-generated content, such as chat history, emails, reviews, and social media interactions." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 447, + 533, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 447, + 533, + 458 + ], + "spans": [ + { + "bbox": [ + 107, + 447, + 533, + 458 + ], + "type": "text", + "content": "- Persona-Based User Simulation: The use of LLM-based agents to simulate and generate personalized interactions." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 107, + 465, + 538, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 465, + 538, + 491 + ], + "spans": [ + { + "bbox": [ + 107, + 465, + 538, + 491 + ], + "type": "text", + "content": "Integrating this personalized information at various stages of the RAG and agent workflows enables dynamic alignment with human preferences, thereby making responses more user-centric and adaptive." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 505, + 282, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 505, + 282, + 514 + ], + "spans": [ + { + "bbox": [ + 107, + 505, + 282, + 514 + ], + "type": "text", + "content": "3 HOW TO ADOPT PERSONALIZATION" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 522, + 435, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 522, + 435, + 533 + ], + "spans": [ + { + "bbox": [ + 107, + 522, + 435, + 533 + ], + "type": "text", + "content": "We define the process of introducing personalization within the RAG pipeline as follows:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 252, + 542, + 539, + 553 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 252, + 542, + 539, + 553 + ], + "spans": [ + { + "bbox": [ + 252, + 542, + 539, + 553 + ], + "type": "interline_equation", + "content": "g = \\mathcal {G} (\\mathcal {R} (Q (q, p), C, p), \\text {p r o m p t}, p, \\theta) \\tag {1}", + "image_path": "c5b36e8fc5d85eef240bb82935b05407c1e84bf402d0060b0bb413dc04743f40.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 561, + 539, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 561, + 539, + 654 + ], + "spans": [ + { + "bbox": [ + 107, + 561, + 539, + 654 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 107, + 561, + 539, + 654 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 107, + 561, + 539, + 654 + ], + "type": "text", + "content": " denotes personalized information, and the process unfolds in three steps. In the pre-retrieval phase, query processing " + }, + { + "bbox": [ + 107, + 561, + 539, + 654 + ], + "type": "inline_equation", + "content": "(Q)" + }, + { + "bbox": [ + 107, + 561, + 539, + 654 + ], + "type": "text", + "content": " refines the query " + }, + { + "bbox": [ + 107, + 561, + 539, + 654 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 107, + 561, + 539, + 654 + ], + "type": "text", + "content": " using personalized information, such as through query rewriting or expansion. During the retrieval phase, the retriever " + }, + { + "bbox": [ + 107, + 561, + 539, + 654 + ], + "type": "inline_equation", + "content": "(\\mathcal{R})" + }, + { + "bbox": [ + 107, + 561, + 539, + 654 + ], + "type": "text", + "content": " leverages " + }, + { + "bbox": [ + 107, + 561, + 539, + 654 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 107, + 561, + 539, + 654 + ], + "type": "text", + "content": " to fetch relevant documents from the corpus " + }, + { + "bbox": [ + 107, + 561, + 539, + 654 + ], + "type": "inline_equation", + "content": "(C)" + }, + { + "bbox": [ + 107, + 561, + 539, + 654 + ], + "type": "text", + "content": ". Finally, in the generation phase, the retrieved information, combined with " + }, + { + "bbox": [ + 107, + 561, + 539, + 654 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 107, + 561, + 539, + 654 + ], + "type": "text", + "content": " and structured using the given prompt, id fed into the generator " + }, + { + "bbox": [ + 107, + 561, + 539, + 654 + ], + "type": "inline_equation", + "content": "(\\mathcal{G})" + }, + { + "bbox": [ + 107, + 561, + 539, + 654 + ], + "type": "text", + "content": " with parameter " + }, + { + "bbox": [ + 107, + 561, + 539, + 654 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 107, + 561, + 539, + 654 + ], + "type": "text", + "content": " to produce the final response " + }, + { + "bbox": [ + 107, + 561, + 539, + 654 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 107, + 561, + 539, + 654 + ], + "type": "text", + "content": ". It is evident that personalized information directly influences multiple stages of the RAG pipeline. In this survey, we consider the agent system as a specialized application of the RAG framework, where personalization is incorporated in a manner similar to the RAG framework." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "spans": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 477, + 72, + 538, + 80 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 477, + 72, + 538, + 80 + ], + "spans": [ + { + "bbox": [ + 477, + 72, + 538, + 80 + ], + "type": "text", + "content": "X. Li and P. Jia, et al." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 321, + 658, + 326, + 665 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 658, + 326, + 665 + ], + "spans": [ + { + "bbox": [ + 321, + 658, + 326, + 665 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 70, + 99, + 500, + 304 + ], + "blocks": [ + { + "bbox": [ + 70, + 99, + 500, + 304 + ], + "lines": [ + { + "bbox": [ + 70, + 99, + 500, + 304 + ], + "spans": [ + { + "bbox": [ + 70, + 99, + 500, + 304 + ], + "type": "image", + "image_path": "8f9e5a0db9555c1a4ed96186bb98667b2c4fd2abae37751b9dee8a95b60f65c1.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 193, + 310, + 379, + 320 + ], + "lines": [ + { + "bbox": [ + 193, + 310, + 379, + 320 + ], + "spans": [ + { + "bbox": [ + 193, + 310, + 379, + 320 + ], + "type": "text", + "content": "Fig. 2. Overview of the personalized pre-retrieval stage." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 337, + 254, + 347 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 337, + 254, + 347 + ], + "spans": [ + { + "bbox": [ + 70, + 337, + 254, + 347 + ], + "type": "text", + "content": "4 WHERE TO ADOPT PERSONALIZATION" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 354, + 146, + 364 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 354, + 146, + 364 + ], + "spans": [ + { + "bbox": [ + 70, + 354, + 146, + 364 + ], + "type": "text", + "content": "4.1 Pre-retrieval" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 372, + 503, + 424 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 372, + 503, + 424 + ], + "spans": [ + { + "bbox": [ + 69, + 372, + 503, + 424 + ], + "type": "text", + "content": "4.1.1 Definition. Pre-retrieval is a crucial step in information retrieval systems, where the original user query is enhanced or modified before the retrieval process to improve the relevance and quality of the search results, as shown in Figure 2. This process often incorporates additional contextual or personalized information to better align the query with the user's intent. The process can be formalized as follows:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 261, + 432, + 503, + 444 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 261, + 432, + 503, + 444 + ], + "spans": [ + { + "bbox": [ + 261, + 432, + 503, + 444 + ], + "type": "interline_equation", + "content": "q ^ {*} = Q (q, p) \\tag {2}", + "image_path": "faaeeb7d36800aed003df61ec8252e783b23b8a2f81ce0769f1b50247566d8e0.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 452, + 502, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 452, + 502, + 475 + ], + "spans": [ + { + "bbox": [ + 69, + 452, + 502, + 475 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 69, + 452, + 502, + 475 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 69, + 452, + 502, + 475 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 69, + 452, + 502, + 475 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 69, + 452, + 502, + 475 + ], + "type": "text", + "content": " denote the personalized information and original query, and " + }, + { + "bbox": [ + 69, + 452, + 502, + 475 + ], + "type": "inline_equation", + "content": "q^{*}" + }, + { + "bbox": [ + 69, + 452, + 502, + 475 + ], + "type": "text", + "content": " is the optimized query after query reformulation." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 487, + 503, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 487, + 503, + 552 + ], + "spans": [ + { + "bbox": [ + 69, + 487, + 503, + 552 + ], + "type": "text", + "content": "4.1.2 Query Rewriting. Query rewriting in RAG at the pre-retrieval stage refers to the process of reformulating user queries to enhance retrieval effectiveness by improving relevance, disambiguating intent, or incorporating contextual information before retrieving documents from an external knowledge source. The literature on personalized query rewriting can be broadly classified into two primary categories: (1) Direct Personalized Query Rewriting and (2) Auxiliary Personalized Query Rewriting." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 562, + 503, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 562, + 503, + 655 + ], + "spans": [ + { + "bbox": [ + 69, + 562, + 503, + 655 + ], + "type": "text", + "content": "(1). Direct Personalized Query Rewriting. The first category focuses on personalized query rewriting by using direct models. For example, Cho et al. [25] presents a personalized search-based query rewrite system for conversational AI that addresses user-specific semantic and phonetic errors. Nguyen et al. [82] apply reinforcement learning techniques to improve query rewriting in online e-commerce systems, leveraging distilled LLMs for personalized performance. CLE-QR [60] explores query rewriting in Taobao's search engine to enhance user satisfaction through customized query adaptation. CGF [38] introduces a constrained generation framework that allows for more flexible and personalized query rewriting in conversational AI. Li et al. [57] investigate learning methods to rewrite prompts for personalized" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 70, + 72, + 212, + 81 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 72, + 212, + 81 + ], + "spans": [ + { + "bbox": [ + 70, + 72, + 212, + 81 + ], + "type": "text", + "content": "A Survey of Personalization: From RAG to Agent" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "spans": [ + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 284, + 658, + 288, + 665 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 658, + 288, + 665 + ], + "spans": [ + { + "bbox": [ + 284, + 658, + 288, + 665 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 96, + 539, + 135 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 96, + 539, + 135 + ], + "spans": [ + { + "bbox": [ + 107, + 96, + 539, + 135 + ], + "type": "text", + "content": "text generation, improving the relevance and engagement of AI-generated content. Additionally, PEARL [80] discusses personalizing large language model-based writing assistants through the integration of generation-calibrated retrievers, enhancing AI-generated content." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 147, + 541, + 296 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 147, + 541, + 296 + ], + "spans": [ + { + "bbox": [ + 107, + 147, + 541, + 296 + ], + "type": "text", + "content": "(2). Auxiliary Personalized Query Rewriting. The second category emphasizes personalized query rewriting by using auxiliary mechanisms, such as retrieval, reasoning strategies, and external memory. Zhou et al. [173] propose a least-to-most prompting strategy that aids in complex reasoning within LLMs, which can be adapted for personalized text generation. ERAGent [112] enhance retrieval-augmented LLMs to improve personalization, efficiency, and accuracy, indirectly supporting personalized query rewriting for content generation. CoPS [174] integrate LLMs with memory mechanisms to create more personalized search experiences, which also influences content generation through better query understanding. Further, Agent4Ranking [61] employs multi-agent LLMs to perform semantic robust ranking, including personalized query rewriting to improve search rankings. FIG [22] combine graph-based methods with LLMs to query rewrite, improving personalized content generation and conversational interactions. Lastly, BASES [99] employ LLM-based agents to simulate large-scale web search user interactions, contributing to the development of personalized query rewriting strategies for content generation." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 308, + 541, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 308, + 541, + 389 + ], + "spans": [ + { + "bbox": [ + 107, + 308, + 541, + 389 + ], + "type": "text", + "content": "4.1.3 Query Expansion. Query expansion enhances retrieval systems by expanding a user's original query with additional terms, synonyms, or refined structure to better capture intent. This improves the relevance and scope of retrieved documents. Recent advancements in LLMs have reinvigorated this field [46, 48, 132], leveraging their comprehension and generation abilities to expand queries using encoded knowledge or external retrieval, with notable success. Personalized query expansion, a subset, incorporates user-specific data to tailor results, boosting performance and customizing the search experience." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 400, + 541, + 655 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 107, + 400, + 541, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 400, + 541, + 521 + ], + "spans": [ + { + "bbox": [ + 107, + 400, + 541, + 521 + ], + "type": "text", + "content": "(1). Tagging-based Query Expansion. By 2009, studies began incorporating tagging information to enhance personalized query expansion. For instance, Gossle [10] introduced the TagMap and TagRank algorithms, which dynamically selected tags from personalized networks constructed using the cosine similarity of user-item tag distances, improving recall performance. Similarly, Biancalana and Micarelli [12] recorded user queries and visited URLs, leveraging social bookmarking to extract relevant tags and build a personalized three-dimensional co-occurrence matrix. Based on this, multiple semantically categorized expanded queries were generated to better reflect user interests. Further advancements include SoQuES [15], which integrated tag semantic similarity with social proximity, and a graph-based approach [172] that utilized Tag-Topic models and pseudo-relevance feedback for term weighting, tailoring the expansion process to individual user preferences." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 533, + 539, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 533, + 539, + 655 + ], + "spans": [ + { + "bbox": [ + 107, + 533, + 539, + 655 + ], + "type": "text", + "content": "(2). Else. Apart from tagging-based techniques, early research on Personalized Query Expansion primarily focused on modeling user personalization based on search history [66], social networks, or preferences derived from friendship networks [9]. The Axiomatic PQEC framework [79] formalized expansion rules using both local (user behavior-driven) and social (network-driven) strategies. In 2017, WE-LM [144] advanced this paradigm by modeling multi-relational networks with word embeddings across tag-word relationships, refining associations through affinity graphs. Later, PSQE [14] further improved tagging-based methods using utf-iuf user profiling, integrating a tag similarity graph with user profiles in the online phase to compute expansion terms relevant to user interests in real-time, achieving dynamic personalized expansion. In addition, PQEWC [7] leveraged clustering and contextual word embeddings to optimize query expansions dynamically." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "spans": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 477, + 72, + 538, + 80 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 477, + 72, + 538, + 80 + ], + "spans": [ + { + "bbox": [ + 477, + 72, + 538, + 80 + ], + "type": "text", + "content": "X. Li and P. Jia, et al." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 321, + 658, + 326, + 666 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 658, + 326, + 666 + ], + "spans": [ + { + "bbox": [ + 321, + 658, + 326, + 666 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 93, + 503, + 223 + ], + "blocks": [ + { + "bbox": [ + 72, + 93, + 503, + 223 + ], + "lines": [ + { + "bbox": [ + 72, + 93, + 503, + 223 + ], + "spans": [ + { + "bbox": [ + 72, + 93, + 503, + 223 + ], + "type": "image", + "image_path": "431fadc2fb6b837e6448de02affe85f7a85af23cf964b4cebff641487aab910d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 200, + 228, + 372, + 239 + ], + "lines": [ + { + "bbox": [ + 200, + 228, + 372, + 239 + ], + "spans": [ + { + "bbox": [ + 200, + 228, + 372, + 239 + ], + "type": "text", + "content": "Fig. 3. Overview of the personalized retrieval stage." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 263, + 503, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 263, + 503, + 330 + ], + "spans": [ + { + "bbox": [ + 70, + 263, + 503, + 330 + ], + "type": "text", + "content": "4.1.4 Others. Besides query rewriting and query expansion, other personalized query-related research focuses on areas like query disambiguation and query auto-completion [116]. Bobo [33] allows users to input contextual terms reflecting their domain knowledge. In 2019, a method [52] applied fastText embeddings from recent queries to rank candidates. In addition, PSQE [8] employed synthetic user profiles from Wikipedia and word2vec embeddings for query disambiguation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 341, + 503, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 341, + 503, + 393 + ], + "spans": [ + { + "bbox": [ + 70, + 341, + 503, + 393 + ], + "type": "text", + "content": "4.1.5 Discussion. While both query rewriting and query expansion aim to align user input with system understanding to enhance retrieval quality, their roles in personalization differ in fundamental ways. Understanding the distinct operational characteristics and application scenarios of each technique is essential for designing effective personalized retrieval systems. The key takeaways are listed as follows:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 401, + 501, + 453 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 70, + 401, + 501, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 401, + 501, + 426 + ], + "spans": [ + { + "bbox": [ + 70, + 401, + 501, + 426 + ], + "type": "text", + "content": "- Query rewriting is most beneficial when the original query is ambiguous, underspecified, or misaligned with retrieval intents, particularly in conversational or multi-turn settings." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 428, + 501, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 428, + 501, + 453 + ], + "spans": [ + { + "bbox": [ + 70, + 428, + 501, + 453 + ], + "type": "text", + "content": "- Query expansion is most effective when the original query is relevant but incomplete - i.e., when it needs to be semantically broadened to cover additional relevant concepts." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 70, + 468, + 132, + 478 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 468, + 132, + 478 + ], + "spans": [ + { + "bbox": [ + 70, + 468, + 132, + 478 + ], + "type": "text", + "content": "4.2 Retrieval" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 485, + 501, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 485, + 501, + 538 + ], + "spans": [ + { + "bbox": [ + 70, + 485, + 501, + 538 + ], + "type": "text", + "content": "4.2.1 Definition. The retrieval process involves finding the most relevant documents " + }, + { + "bbox": [ + 70, + 485, + 501, + 538 + ], + "type": "inline_equation", + "content": "D^{*}" + }, + { + "bbox": [ + 70, + 485, + 501, + 538 + ], + "type": "text", + "content": " from a corpus " + }, + { + "bbox": [ + 70, + 485, + 501, + 538 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 70, + 485, + 501, + 538 + ], + "type": "text", + "content": " based on a query " + }, + { + "bbox": [ + 70, + 485, + 501, + 538 + ], + "type": "inline_equation", + "content": "q^{*}" + }, + { + "bbox": [ + 70, + 485, + 501, + 538 + ], + "type": "text", + "content": ", as shown in Figure 3. To incorporate personalization, additional user-specific information " + }, + { + "bbox": [ + 70, + 485, + 501, + 538 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 70, + 485, + 501, + 538 + ], + "type": "text", + "content": " is integrated into the retrieval function " + }, + { + "bbox": [ + 70, + 485, + 501, + 538 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 70, + 485, + 501, + 538 + ], + "type": "text", + "content": ". This allows the retrieval process to tailor the selected documents to align with individual user preferences or contexts, thereby enhancing the relevance and personalization of the generated outputs." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 253, + 545, + 503, + 559 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 253, + 545, + 503, + 559 + ], + "spans": [ + { + "bbox": [ + 253, + 545, + 503, + 559 + ], + "type": "interline_equation", + "content": "D ^ {*} = \\mathcal {R} (q ^ {*}, C, p) \\tag {3}", + "image_path": "91c490fa5f8c0af657eecb79cedc896d6a77e119d6fb8493fe9338606a50ca55.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 566, + 501, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 566, + 501, + 605 + ], + "spans": [ + { + "bbox": [ + 70, + 566, + 501, + 605 + ], + "type": "text", + "content": "In the retrieval process, personalization can primarily be introduced by focusing on three steps: indexing, retrieval, and post-retrieval. These steps ensure efficient and accurate retrieval of relevant documents or knowledge, while tailoring the process to individual user preferences. Below, we provide a detailed explanation of each step." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 616, + 503, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 616, + 503, + 654 + ], + "spans": [ + { + "bbox": [ + 70, + 616, + 503, + 654 + ], + "type": "text", + "content": "4.2.2 Indexing. Indexing organizes knowledge base data into a structured format to facilitate efficient retrieval. Within the RAG pipeline, documents are either chunked or entirely encoded into representations before being integrated into searchable systems [30, 117]. Conventional encoding methods employ either sparse encoding techniques (e.g.," + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 70, + 72, + 212, + 81 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 72, + 212, + 81 + ], + "spans": [ + { + "bbox": [ + 70, + 72, + 212, + 81 + ], + "type": "text", + "content": "A Survey of Personalization: From RAG to Agent" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "spans": [ + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 283, + 658, + 289, + 665 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 658, + 289, + 665 + ], + "spans": [ + { + "bbox": [ + 283, + 658, + 289, + 665 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 96, + 539, + 121 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 96, + 539, + 121 + ], + "spans": [ + { + "bbox": [ + 107, + 96, + 539, + 121 + ], + "type": "text", + "content": "TF-IDF [95], BM25 [102]) or dense encoding approaches leveraging pre-trained models, such as BERT [1], Siamese Encoders [98], or LLM-based encoders [64, 147]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 124, + 541, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 124, + 541, + 217 + ], + "spans": [ + { + "bbox": [ + 107, + 124, + 541, + 217 + ], + "type": "text", + "content": "To introduce personalization at the indexing stage, PEARL [80] generates user embeddings by encoding personal history data with models like DeBERTa. These embeddings are subsequently clustered to create personalized shared indices. Other approaches integrate knowledge graphs into indexing to enhance retrieval performance. For example, KG-R retriever [21] employs a Hierarchical Index Graph, consisting of a knowledge graph layer and a collaborative document layer, to improve RAG retrieval. EMG-RAG [137] incorporates personalized memory within an editable knowledge graph, enabling dynamic retrieval. Similarly, PGraphRAG [5] leverages user-centric knowledge graphs to enhance personalization in retrieval tasks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 228, + 541, + 267 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 228, + 541, + 267 + ], + "spans": [ + { + "bbox": [ + 107, + 228, + 541, + 267 + ], + "type": "text", + "content": "4.2.3 Retrieval. The Retrieval step matches a user query with the indexed knowledge base to fetch relevant candidates. It can be broadly categorized into four different types: (1) Dense Retrieval, (2) Sparse Retrieval, (3) Prompt-based Retrieval, and (4) Others." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 278, + 541, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 278, + 541, + 426 + ], + "spans": [ + { + "bbox": [ + 107, + 278, + 541, + 426 + ], + "type": "text", + "content": "(1). Dense Retrieval. Dense retrieval methods often use vector embeddings and similarity metrics (e.g., cosine similarity) and achieve personalization by encoding user preferences, context, or interactions into query or document embeddings, enabling tailored results through similarity-based matching. For instance, MeMemo [138] retrieves personalized information by matching user-specific embeddings with document vectors, focusing on private, on-device text generation. Similarly, RECAP [71] and LAPDOG [43] enhance personalized dialogue generation by encoding queries and user profiles as dense vectors and retrieving top-N results, ensuring user-specific context drives the responses. In chatbots, Gu et al. [37] integrates conversational context and user profiles to align retrieved responses with user personas. PersonalM [77] employs group-wise contrastive learning, training its retrieval model to align user queries with domain-specific text fragments, thereby improving personalization. UIA [155] employs dual encoders to retrieve documents tailored to user preferences. XPERT [125] incorporates temporal events and user interactions into embeddings, enabling large-scale retrieval across millions of items." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 429, + 542, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 429, + 542, + 522 + ], + "spans": [ + { + "bbox": [ + 107, + 429, + 542, + 522 + ], + "type": "text", + "content": "Dense retrieval also enhances specific applications like e-commerce, medical assistance, and language models. DPSR [157] and RTM [11] encode user queries and product information to personalize product searches dynamically. Pearl [80] and MemPrompt [74] retrieve personalized content by leveraging historical user data and memory-assisted mechanisms. EERRA [23] uses review embeddings as dense queries for recommendations. In medical assistance, MALP [160] and User-LLM [84] integrate short- and long-term user interactions into embeddings for contextualized, personalized responses. Finally, PER-PCS [120] retrieves relevant information using individual user histories, enhancing the personalization capabilities of large language models." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 533, + 542, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 533, + 542, + 655 + ], + "spans": [ + { + "bbox": [ + 107, + 533, + 542, + 655 + ], + "type": "text", + "content": "(2). Sparse Retrieval. Sparse retrieval methods often rely on term-based matching (e.g., BM25) and apply personalization by assigning higher weights to terms or keywords that are more relevant to the user. OPPU [121] uses the BM25 algorithm to select the k most relevant records from the user's historical data for the current query. Similarly, PAG [101] incorporates user input and profiles to enhance summarization and retrieval, aligning sparse representations with personalization objectives for large language models. Au et al. [5] uses BM25 search algorithms to find entries related to the target user or neighboring users through the graph structure. UniMS-RAG [128] combines sparse and dense retrieval by leveraging multi-source knowledge, such as dialogue context and user images, to refine personalized responses in dialogue systems. Lastly, Deng et al. [29] apply sparse retrieval to support fact-based queries, considering user queries and preferences to enhance answer generation for e-commerce applications." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "spans": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 477, + 72, + 538, + 80 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 477, + 72, + 538, + 80 + ], + "spans": [ + { + "bbox": [ + 477, + 72, + 538, + 80 + ], + "type": "text", + "content": "X. Li and P. Jia, et al." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 321, + 658, + 327, + 666 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 658, + 327, + 666 + ], + "spans": [ + { + "bbox": [ + 321, + 658, + 327, + 666 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 70, + 96, + 504, + 362 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 70, + 96, + 503, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 96, + 503, + 244 + ], + "spans": [ + { + "bbox": [ + 70, + 96, + 503, + 244 + ], + "type": "text", + "content": "(3). Prompt-based Retrieval. Prompt-based retrieval leverages prompts to guide retrieval from the model or external sources and introduces personalization by crafting user-specific prompts that guide the retrieval process. These prompts may include explicit user preferences, historical interactions, or detailed instructions that reflect the user's unique requirements. By embedding this personalized context directly into the prompt, the retrieval process can dynamically adjust to capture and return results that are most relevant to the user. LAPS [50] focuses on multi-session conversational search by storing user preferences and dialogue context, then using prompts to retrieve relevant information tailored to the user's biases and categories of interest. UniMP [140] employs user interaction histories as input to prompt-based retrieval, enabling personalized recommendations for multi-modal tasks, such as vision-language applications, by aligning prompts with user behavioral data. In contrast, Shen et al. [111] explores the use of LLMs to extract empathy and narrative styles from user-provided stories, but this work primarily focuses on style extraction and does not explicitly involve a retrieval component." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 255, + 504, + 362 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 255, + 504, + 362 + ], + "spans": [ + { + "bbox": [ + 70, + 255, + 504, + 362 + ], + "type": "text", + "content": "(4). Others. Reinforcement learning-based retrieval personalizes the process by optimizing retrieval policies based on user feedback, learning user preferences over time to adjust strategies. Salemi et al. [103] combines models like BM25, RbR, and dense retrieval, refining them with reinforcement learning (RL) and knowledge distillation (KD) to adapt to user profiles for personalized outputs. Parameter-based retrieval leverages pre-trained model parameters to implicitly store and retrieve user-specific information, allowing direct retrieval from the model without traditional indices. PersonalTM [65] generates document identifiers (Document IDs) using a Transformer model, encoding query, history, and document relationships into its parameters for personalization. Similarly, Zhang et al. [165] uses parameterized representations to integrate user queries and histories, tailoring responses to individual preferences." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 70, + 373, + 502, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 373, + 502, + 411 + ], + "spans": [ + { + "bbox": [ + 70, + 373, + 502, + 411 + ], + "type": "text", + "content": "4.2.4 Post-retrieval. Current Post-Retrieval methods primarily focus on refining retrieved documents or responses to improve relevance and coherence, current methodologies could be categorized into three parts (1) Re-ranking, (2) Summarization, and (3) Compression." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 422, + 504, + 654 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 70, + 422, + 504, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 422, + 504, + 529 + ], + "spans": [ + { + "bbox": [ + 70, + 422, + 504, + 529 + ], + "type": "text", + "content": "(1). Re-ranking. Re-ranking enhances personalized content generation by prioritizing more relevant documents at the top. PersonaRAG [156] extends RAG by integrating user-centric agents, such as the Live Session Agent and the Document Ranking Agent, to refine document ranking and improve overall performance. Pavliukevich et al. [89] propose a cross-encoder BERT model for re-ranking external knowledge within a personalized context. UniMS-RAG [128] introduces a scoring mechanism that evaluates retrieved documents and outputs by optimizing the retriever. Besides, it includes an evidence attention mask, enabling re-ranking during inference and applying it to personalized datasets. Salemi and Zamani [106] present an iterative approach to optimizing ranking results based on the expectation-maximization algorithm, with performance validated in personalized scenarios." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 540, + 502, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 540, + 502, + 578 + ], + "spans": [ + { + "bbox": [ + 70, + 540, + 502, + 578 + ], + "type": "text", + "content": "(2). Summarization. Summarization refers to the process of summarizing retrieved information to enhance performance. For instance, Zhang et al. [164] introduced a role-playing agent system to summarize retrieved history in order to improve the final Personalized Opinion Summarization process." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 589, + 503, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 589, + 503, + 654 + ], + "spans": [ + { + "bbox": [ + 70, + 589, + 503, + 654 + ], + "type": "text", + "content": "(3). Compression. Compression involves condensing embeddings or retrieved content to enhance efficiency and effectiveness. Approaches like AutoCompressor [24] compress contextual embeddings into shorter semantic representations, and FIT-RAG [76] introduces a self-knowledge recognizer along with a sub-document-level token reduction mechanism to minimize tokens within RAG pipeline. However, few studies have specifically explored personalized fields, highlighting a promising direction for future research." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 70, + 72, + 212, + 81 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 72, + 212, + 81 + ], + "spans": [ + { + "bbox": [ + 70, + 72, + 212, + 81 + ], + "type": "text", + "content": "A Survey of Personalization: From RAG to Agent" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "spans": [ + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 284, + 658, + 289, + 665 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 658, + 289, + 665 + ], + "spans": [ + { + "bbox": [ + 284, + 658, + 289, + 665 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 96, + 541, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 96, + 541, + 342 + ], + "spans": [ + { + "bbox": [ + 107, + 96, + 541, + 342 + ], + "type": "text", + "content": "4.2.5 Discussion. Indexing, retrieval, and post-retrieval methods each play a critical role in ensuring efficient and personalized information processing, with specific applications and trade-offs. Indexing focuses on organizing knowledge bases for efficient retrieval, using techniques such as sparse encoding methods like TF-IDF and BM25, which are efficient but limited in understanding semantics, and dense encoding methods like BERT and DeBERTa, which provide better semantic understanding but require significant computational resources. These methods are widely used in tasks like question answering and personalized recommendation systems. Retrieval involves matching user queries with relevant documents and can be categorized into dense retrieval, which provides high semantic understanding and personalization but is computationally expensive; sparse retrieval, which is efficient and interpretable but less capable of handling semantics; prompt-based retrieval, which is highly flexible and adaptable to user needs but requires careful engineering of prompts; and advanced methods like reinforcement learning-based approaches, which dynamically adapt to user feedback but are complex to implement. This step is essential in applications like personalized dialogue systems, search engines, and e-commerce. Post-retrieval methods refine retrieved results to enhance relevance and coherence through re-ranking, which improves personalization and prioritizes relevant content but increases computational overhead; summarization, which simplifies complex information for better user understanding but risks losing critical details; and compression, which reduces computational costs by condensing information but remains underexplored in personalized contexts. Together, these methods provide a comprehensive pipeline for delivering efficient, relevant, and personalized outputs, balancing their strengths in semantic understanding, relevance, and flexibility with challenges related to computational costs and implementation complexity." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 354, + 177, + 363 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 354, + 177, + 363 + ], + "spans": [ + { + "bbox": [ + 107, + 354, + 177, + 363 + ], + "type": "text", + "content": "4.3 Generation" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 370, + 541, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 370, + 541, + 410 + ], + "spans": [ + { + "bbox": [ + 107, + 370, + 541, + 410 + ], + "type": "text", + "content": "4.3.1 Definition. Personalized generation incorporates user-specific retrieved documents " + }, + { + "bbox": [ + 107, + 370, + 541, + 410 + ], + "type": "inline_equation", + "content": "D^{*}" + }, + { + "bbox": [ + 107, + 370, + 541, + 410 + ], + "type": "text", + "content": ", task-specific prompt prompt, and user preference information " + }, + { + "bbox": [ + 107, + 370, + 541, + 410 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 107, + 370, + 541, + 410 + ], + "type": "text", + "content": " via the generator " + }, + { + "bbox": [ + 107, + 370, + 541, + 410 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 107, + 370, + 541, + 410 + ], + "type": "text", + "content": " parameterized by " + }, + { + "bbox": [ + 107, + 370, + 541, + 410 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 107, + 370, + 541, + 410 + ], + "type": "text", + "content": " to produce tailored content " + }, + { + "bbox": [ + 107, + 370, + 541, + 410 + ], + "type": "inline_equation", + "content": "g^{*}" + }, + { + "bbox": [ + 107, + 370, + 541, + 410 + ], + "type": "text", + "content": " aligned with individual preference, where the flow is shown in Figure 4. The generation process can be formulated as" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 274, + 417, + 539, + 430 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 274, + 417, + 539, + 430 + ], + "spans": [ + { + "bbox": [ + 274, + 417, + 539, + 430 + ], + "type": "interline_equation", + "content": "g ^ {*} = \\mathcal {G} (D ^ {*}, \\text {p r o m p t}, p, \\theta). \\tag {4}", + "image_path": "7122c71aefb1637d6b1b8ecf544934b92473bd4011b205e6f3793fb7593c1f2c.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 437, + 541, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 437, + 541, + 503 + ], + "spans": [ + { + "bbox": [ + 107, + 437, + 541, + 503 + ], + "type": "text", + "content": "Personalized generation can be achieved by incorporating explicit and implicit preferences. Explicit preference-driven methodologies utilize direct input signals (e.g., " + }, + { + "bbox": [ + 107, + 437, + 541, + 503 + ], + "type": "inline_equation", + "content": "D^{*}" + }, + { + "bbox": [ + 107, + 437, + 541, + 503 + ], + "type": "text", + "content": ", prompt, and " + }, + { + "bbox": [ + 107, + 437, + 541, + 503 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 107, + 437, + 541, + 503 + ], + "type": "text", + "content": "), to tailor outputs to specific user preferences. Conversely, implicit preference-encoded approaches embed personalized information within the parameters " + }, + { + "bbox": [ + 107, + 437, + 541, + 503 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 107, + 437, + 541, + 503 + ], + "type": "text", + "content": " of the generator model, during training, thereby facilitating preference alignment without the necessity for explicit runtime inputs." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 513, + 541, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 513, + 541, + 594 + ], + "spans": [ + { + "bbox": [ + 107, + 513, + 541, + 594 + ], + "type": "text", + "content": "4.3.2 Generation from Explicit Preferences. Integrating explicit preferences into LLMs facilitates personalized content generation. Explicit preference information encompasses user demographic information (e.g., age, occupation, gender, location), user behavior sequences (reflecting historical behavioral patterns), and user historical output texts (capturing writing style and tone preferences). The injection of explicit preferences for personalized generation can be categorized into three types: (1) Direct-integrated Prompting, (2) Summary-augmented Prompting, and (3) Adaptive Prompting." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 603, + 541, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 603, + 541, + 656 + ], + "spans": [ + { + "bbox": [ + 107, + 603, + 541, + 656 + ], + "type": "text", + "content": "(1). Direct-integrated Prompting. Integrating user explicit preferences into language models through prompting enables the prediction of users' intent and behavioral patterns, facilitating personalized content generation. For instance, " + }, + { + "bbox": [ + 107, + 603, + 541, + 656 + ], + "type": "inline_equation", + "content": "\\mathrm{P}^2" + }, + { + "bbox": [ + 107, + 603, + 541, + 656 + ], + "type": "text", + "content": " [49], Character Profiling [154], and OpinionQA [107] integrate personalized data into LLMs through prompting for role-playing task, thereby aligning the model's responses with specified user profiles. Kang et al. [51] and Liu et al. [67]" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "spans": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 477, + 72, + 538, + 80 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 477, + 72, + 538, + 80 + ], + "spans": [ + { + "bbox": [ + 477, + 72, + 538, + 80 + ], + "type": "text", + "content": "X. Li and P. Jia, et al." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 320, + 658, + 328, + 666 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 658, + 328, + 666 + ], + "spans": [ + { + "bbox": [ + 320, + 658, + 328, + 666 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 83, + 95, + 491, + 232 + ], + "blocks": [ + { + "bbox": [ + 83, + 95, + 491, + 232 + ], + "lines": [ + { + "bbox": [ + 83, + 95, + 491, + 232 + ], + "spans": [ + { + "bbox": [ + 83, + 95, + 491, + 232 + ], + "type": "image", + "image_path": "684289db43c065be977860260a5ca5599fa5f676c9663e41befff2d9bcc9c089.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 195, + 245, + 376, + 256 + ], + "lines": [ + { + "bbox": [ + 195, + 245, + 376, + 256 + ], + "spans": [ + { + "bbox": [ + 195, + 245, + 376, + 256 + ], + "type": "text", + "content": "Fig. 4. Overview of the personalized generation stage." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 277, + 503, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 277, + 503, + 344 + ], + "spans": [ + { + "bbox": [ + 70, + 277, + 503, + 344 + ], + "type": "text", + "content": "integrate interaction histories into LLMs via prompting to predict user rating for candidate items. Cue-CoT [129] employs chain-of-thought reasoning to infer user needs from contextual cues, enabling personalized responses to in-depth dialogue questions. Additionally, TICL [26] proposes a trial-and-error framework that critiques initial LLM-generated responses, derives explanations and integrates these negative examples into prompts to improve personalization alignment." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 354, + 504, + 606 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 69, + 354, + 504, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 354, + 504, + 475 + ], + "spans": [ + { + "bbox": [ + 69, + 354, + 504, + 475 + ], + "type": "text", + "content": "(2). Summary-augmented Prompting. Direct integration of personalized information via prompting struggles with ambiguous intent signals: Lengthy interaction histories introduce noise that obscures critical behavioral patterns [69], while sparse behavioral data lacks sufficient context for LLMs to derive meaningful user preferences. To address these issues, recent approaches focus on summarizing user personalized intents and integrating them into prompts. For instance, GPG [158] extracts key user habits and preferences from personal contexts, enabling fine-grained personalization. Similarly, LLMs are employed to generate task-specific summaries of user preferences, enhancing retrieval-augmented personalized generation capabilities [101]. In recommendation systems, ONCE [70], LLMTreeRec [163], and KAR [145] leverage historical user-item interactions to summarize user preferences. Furthermore, Matryoshka [58] generates user preference summaries by dynamically retrieving and synthesizing historical data." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 485, + 504, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 485, + 504, + 606 + ], + "spans": [ + { + "bbox": [ + 69, + 485, + 504, + 606 + ], + "type": "text", + "content": "(3). Adaptive Prompting. Manually designing personalized prompts demands both expert knowledge and significant labor, motivating the development of automated methods for personalized prompt generation. For example, Li et al. [57] trains a personalized prompt rewriter via supervised and reinforcement learning. RecGPT [166] and PEPLER-D [59] leverage prompt tuning to generate personalized prompts, enhancing sequential and explainable recommendations, respectively. GRAPA [94] integrates semantic and collaborative signals from user-item interaction graphs with graph neural networks to generate context-aware personalized prompts. SGPT [28] employs prompt tuning to jointly model common and group-specific patterns, bridging generalized and personalized federated learning paradigms. Furthermore, PFCL [152] achieves multi-granularity human preference modeling: coarse-grained prompts distill shared knowledge, while fine-grained prompts adapt to individual user characteristics." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 70, + 616, + 504, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 616, + 504, + 655 + ], + "spans": [ + { + "bbox": [ + 70, + 616, + 504, + 655 + ], + "type": "text", + "content": "4.3.3 Generation from Implicit Preferences. Unlike explicit preference modeling, which captures user preferences through textual input, implicit preference-based methods incorporate personalization through internal parameters. This personalization is achieved either through Parameter-Efficient Fine-tuning (PEFT) techniques, such as LoRA [42]," + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 70, + 72, + 212, + 81 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 72, + 212, + 81 + ], + "spans": [ + { + "bbox": [ + 70, + 72, + 212, + 81 + ], + "type": "text", + "content": "A Survey of Personalization: From RAG to Agent" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "spans": [ + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 283, + 658, + 290, + 666 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 658, + 290, + 666 + ], + "spans": [ + { + "bbox": [ + 283, + 658, + 290, + 666 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 96, + 541, + 122 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 96, + 541, + 122 + ], + "spans": [ + { + "bbox": [ + 107, + 96, + 541, + 122 + ], + "type": "text", + "content": "or reinforcement learning-based approaches for preference alignment [20, 57]. Based on these strategies, we classify existing methods into two categories: (1) Fine-tuning-Based Methods and (2) Reinforcement Learning-Based Methods." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 132, + 541, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 132, + 541, + 253 + ], + "spans": [ + { + "bbox": [ + 107, + 132, + 541, + 253 + ], + "type": "text", + "content": "(1). Fine-tuning Based Methods. For fine-tuning methods, LoRA is the most widely adopted since it is resource-efficient and enables rapid adaptation without compromising model performance. PLoRA [165] introduces a personalized knowledge integration framework that combines task-specific LoRA with user-specific knowledge. Similarly, LM-P [142] personalizes information via LoRA by incorporating User ID as a personalization factor. MiLP [165] employs Bayesian optimization to determine the optimal personalization injection configuration, including LoRA settings, to effectively capture and utilize user-specific information. OPPU [122] and PER-PCS [120] follow a similar approach, leveraging user history data for fine-tuning LoRA-based personalization. However, PER-PCS differs by incorporating a gating module that selects the appropriate LoRA, enabling fine-grained personalization. Additionally, Review-LLM [91] integrates LoRA for supervised fine-tuning in the task of personalized review generation." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 255, + 541, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 255, + 541, + 335 + ], + "spans": [ + { + "bbox": [ + 107, + 255, + 541, + 335 + ], + "type": "text", + "content": "Beyond LoRA-based approaches, alternative pipelines have been proposed for personalized generation. UserIdentifier [78] introduces a user-specific identifier, significantly reducing training costs while enhancing personalized demonstration. UserAdapter [171] proposes user-independent prefix embeddings, leveraging prefix tuning for personalization. Meanwhile, HYDRA [175] achieves implicit personalization by training user-specific headers. Recently, researchers have also explored fine-tuning personalized model on edge devices [90] and collaborative learning between small and large language models to enable more personalized generation [161]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 346, + 541, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 346, + 541, + 550 + ], + "spans": [ + { + "bbox": [ + 107, + 346, + 541, + 550 + ], + "type": "text", + "content": "(2). Reinforcement Learning Based Methods. Apart from fine-tuning based methods, recent research has explored reinforcement learning based techniques to personalize text generation by aligning outputs with user preferences. P-RLHF [62] has been proposed to jointly learn a user-specific and reward model to enable text generation that aligns with a user's styles or criteria. P-SOUPS [47] models multiple user preferences as a Multi-Objective Reinforcement Learning (MORL) problem, decomposing preferences into multiple dimensions, each trained independently. PAD [20] aligns text generation with human preferences during inference by utilizing token-level personalized rewards to guide the decoding process. REST-PG [104] introduces a framework that trains large language models to reason over personal data during response generation. This approach first generates reasoning paths to enhance the LLM's reasoning ability and then employs Expectation-Maximization Reinforced Self-Training to iteratively refine the model based on its high-reward outputs. Additionally, Salemi et al. [103] incorporate reinforcement learning into the RAG pipeline to improve retrieval accuracy, thereby enhancing the personalization of generated content. Other applications include RewriterSIRI [57], which has been introduced to generate text via RL-based personalized prompt rewriting using API-based LLMs, and Kulkarni et al. [54], who explore the use of reinforcement learning to optimize RAG for improving the relevance and coherence of chatbot responses in specialized domains, ultimately enhancing user satisfaction and engagement." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 561, + 541, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 561, + 541, + 654 + ], + "spans": [ + { + "bbox": [ + 107, + 561, + 541, + 654 + ], + "type": "text", + "content": "4.3.4 Discussion. Personalized generation can be adopted via both explicit and implicit preference injection, yet they exhibit distinct characteristics that make them suitable for different scenarios. In explicit preference-based generation, personalization is clearly defined through user profile descriptions, contextual information, and similar inputs, which are incorporated into generators via prompts. A key advantage of this approach is explainability, as the personalized information is explicitly provided and easily traceable. Despite leveraging provided preferences and internal knowledge, explicit preference injection's personalization is constrained by model capabilities and irrelevant information interference. In contrast, implicit preference-based generation internalizes personalized information into" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "spans": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 477, + 72, + 539, + 80 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 477, + 72, + 539, + 80 + ], + "spans": [ + { + "bbox": [ + 477, + 72, + 539, + 80 + ], + "type": "text", + "content": "X. Li and P. Jia, et al." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 320, + 658, + 328, + 665 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 658, + 328, + 665 + ], + "spans": [ + { + "bbox": [ + 320, + 658, + 328, + 665 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 75, + 94, + 500, + 388 + ], + "blocks": [ + { + "bbox": [ + 75, + 94, + 500, + 388 + ], + "lines": [ + { + "bbox": [ + 75, + 94, + 500, + 388 + ], + "spans": [ + { + "bbox": [ + 75, + 94, + 500, + 388 + ], + "type": "image", + "image_path": "c1f227eab52914058e52cd6428ef57f5faa6d829581489e901866e44230f1223.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 160, + 397, + 414, + 408 + ], + "lines": [ + { + "bbox": [ + 160, + 397, + 414, + 408 + ], + "spans": [ + { + "bbox": [ + 160, + 397, + 414, + 408 + ], + "type": "text", + "content": "Fig. 5. Overview of transition from personalized RAG to personalized agent." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 440, + 503, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 440, + 503, + 492 + ], + "spans": [ + { + "bbox": [ + 70, + 440, + 503, + 492 + ], + "type": "text", + "content": "the generator's parameters through scene-specific personalized data, thereby adapting the model for more fine-grained personalization. However, these methods typically incur substantial training and computational costs, as they require fine-tuning the generator's internal parameters. Therefore, selecting between these approaches should be guided by the specific application scenario and resource constraints." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 509, + 174, + 520 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 509, + 174, + 520 + ], + "spans": [ + { + "bbox": [ + 70, + 509, + 174, + 520 + ], + "type": "text", + "content": "4.4 From RAG to Agent" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 525, + 503, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 525, + 503, + 593 + ], + "spans": [ + { + "bbox": [ + 69, + 525, + 503, + 593 + ], + "type": "text", + "content": "4.4.1 Definition. A personalized LLM-based agent is a system designed to dynamically incorporate user context, memory, and external tools or APIs to support highly personalized and goal-oriented interactions [19, 45, 146], and solve problems in a goal-oriented manner [63, 113]. From the previously introduced stages of RAG, we observe that the evolution of personalized RAG reveals a structural convergence with agent architectures. We analyze them from three key perspectives:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 601, + 503, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 601, + 503, + 654 + ], + "spans": [ + { + "bbox": [ + 70, + 601, + 503, + 654 + ], + "type": "text", + "content": "- Personalized Understanding: This phase within the agent parallels the query understanding and rewriting process of RAG as outlined in Section 4.1. However, it extends beyond static semantic parsing by incorporating dynamic user profiling [139] and role modeling [110]. This integration enables the agent to dynamically align interactions with implicit user preferences, facilitating personalized responses and task-specific adaptations [96]." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 70, + 72, + 212, + 81 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 72, + 212, + 81 + ], + "spans": [ + { + "bbox": [ + 70, + 72, + 212, + 81 + ], + "type": "text", + "content": "A Survey of Personalization: From RAG to Agent" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "spans": [ + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 283, + 658, + 290, + 666 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 658, + 290, + 666 + ], + "spans": [ + { + "bbox": [ + 283, + 658, + 290, + 666 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 96, + 539, + 232 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 107, + 96, + 538, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 96, + 538, + 162 + ], + "spans": [ + { + "bbox": [ + 107, + 96, + 538, + 162 + ], + "type": "text", + "content": "- Personalized Planning and Execution: This phase in agents mirrors RAG's retrieval process in Section 4.2 yet it advances beyond static document retrieval by incorporating real-time memory management [87] and sophisticated tool and API calling [127]. This approach ensures the dynamic alignment of external knowledge with personalized constraints, such as integrating medical history in healthcare agents [2], to deliver context-aware and user-specific outcomes." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 165, + 539, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 165, + 539, + 232 + ], + "spans": [ + { + "bbox": [ + 107, + 165, + 539, + 232 + ], + "type": "text", + "content": "- Personalized Generation: This phase in agents mirrors RAG's generative process in Section 4.3 but transcends static template-based generation by integrating user preference and fact alignment. Agents dynamically enforce user preferences and ensure fact consistency through role-specific mechanisms (e.g., social adaptability in conversational agents [2]), enabling outputs to evolve in harmony with personalized and situational constraints rather than relying solely on predefined generative frameworks." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 107, + 238, + 539, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 238, + 539, + 304 + ], + "spans": [ + { + "bbox": [ + 107, + 238, + 539, + 304 + ], + "type": "text", + "content": "In general we frame agent architectures as \"personalized RAG++\", where persistent memory [137] replaces static indexes, and tool APIs [17] serve as dynamic knowledge connectors, enabling complicated, human-aligned interactions beyond one-shot retrieval, as shown in Figure 5. This progression highlights that as RAG systems incorporate deeper personalization—requiring user-state tracking, adaptive tool usage, and context-aware generation, they inherently adopt agent-like capabilities." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 316, + 539, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 316, + 539, + 381 + ], + "spans": [ + { + "bbox": [ + 107, + 316, + 539, + 381 + ], + "type": "text", + "content": "4.4.2 Personalized Understanding. Personalized understanding refers to an agent's ability to accurately interpret user inputs by integrating user intent recognition and contextual analysis. This process ensures interactions that are both meaningful and contextually appropriate. The rationale behind this classification lies in its capacity to address three core aspects of understanding: recognizing user intent, analyzing context, and leveraging user profiles. Each of these aspects plays a distinct role in improving the agent's performance." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 392, + 539, + 654 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 107, + 392, + 538, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 392, + 538, + 486 + ], + "spans": [ + { + "bbox": [ + 107, + 392, + 538, + 486 + ], + "type": "text", + "content": "(1). User-profile Understanding. In user-profile understanding, an agent's personalized ability primarily depends on its capacity to accurately model and understand the user's preferences, context, and intentions. Xu et al. [148] proposes a framework in which LLMs are designed to understand the physical world, thereby facilitating a deeper connection between the agent and its environment, which is essential for accurate task execution. Abbasian et al. [2] further expands this understanding by emphasizing the importance of personalization in health agents, where the user's profile directly influences the behavior and decisions of the agent. This user understanding is foundational to ensuring that the AI agent performs tasks in a way that aligns with individual user needs." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 498, + 538, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 498, + 538, + 605 + ], + "spans": [ + { + "bbox": [ + 107, + 498, + 538, + 605 + ], + "type": "text", + "content": "(2). Role Understanding. In agent's role understanding, the role of the agent within these environments is also crucial. Recent studies focus on enhancing role-playing capabilities within LLMs. Wang et al. [139] introduce RoleLLM, a benchmark that aims to elicit and refine the role-playing abilities of LLMs, demonstrating how role understanding influences agent performance in conversational tasks. Similarly, Shao et al. [110] present Character-LLM, a trainable agent framework for role-playing, which tailors its responses based on predefined roles. Wang et al. [134] introduce a method for evaluating personality fidelity in role-playing agents through psychological interviews, aiming to enhance the realism and consistency of AI-driven characters. This role understanding allows for more contextually appropriate interactions, increasing the relevance and utility of AI agents across various applications." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 616, + 539, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 616, + 539, + 654 + ], + "spans": [ + { + "bbox": [ + 107, + 616, + 539, + 654 + ], + "type": "text", + "content": "(3). User-role Joint Understanding. In agent's user-role joint understanding, the intersection of user and role understanding is explored through frameworks that evaluate and enhance the social and personality aspects of LLMs. SocialBench Chen et al. [18] provides a sociality evaluation framework for role-playing agents. Dai et al. [27], and" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "spans": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 477, + 72, + 539, + 80 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 477, + 72, + 539, + 80 + ], + "spans": [ + { + "bbox": [ + 477, + 72, + 539, + 80 + ], + "type": "text", + "content": "X. Li and P. Jia, et al." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 320, + 658, + 328, + 666 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 658, + 328, + 666 + ], + "spans": [ + { + "bbox": [ + 320, + 658, + 328, + 666 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 70, + 96, + 503, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 96, + 503, + 162 + ], + "spans": [ + { + "bbox": [ + 70, + 96, + 503, + 162 + ], + "type": "text", + "content": "[96] extend this by incorporating multi-modal data and personality-indicative information, respectively, which allows agents to better adapt to both user and role understanding in dynamic environments. Furthermore, Wang et al. [126] offers a perspective on how role and environment understanding can improve user experience. Tu et al. [123] contribute by providing a benchmark specifically for evaluating role-playing agents in the Chinese context, adding a cultural dimension to role understanding. Finally, Neeko [153] further advances role-based interactions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 175, + 504, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 175, + 504, + 242 + ], + "spans": [ + { + "bbox": [ + 70, + 175, + 504, + 242 + ], + "type": "text", + "content": "4.4.3 Personalized Planning and Execution. Personalized planning and execution refer to the process of designing and implementing strategies or actions that are specifically tailored to an individual's unique context, and goals [44, 87, 114, 159]. It requires agents to dynamically integrate long-term memory, real-time reasoning, and external tool utilization [40, 41, 169], as demonstrated in healthcare decision support [2] and travel planning scenarios [17]. We analyze two fundamental components that enable this personalization in the following." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 253, + 504, + 508 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 70, + 253, + 504, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 253, + 504, + 374 + ], + "spans": [ + { + "bbox": [ + 70, + 253, + 504, + 374 + ], + "type": "text", + "content": "(1). Memory Management. Effective memory systems allow agents to integrate users' historical preferences, behavioral patterns, and contextual habits, enhancing their ability to make planning and tailor interactions to user-specific needs [17, 127, 136]. The EMG-RAG framework [137] combines editable memory graphs with retrieval-augmented generation to maintain dynamic user profiles, while Park et al. [87] implements memory streams and periodic reflection mechanisms to simulate human-like behavior. In healthcare applications, Abbasian et al. [2] integrates multimodal user data through specialized memory modules to optimize treatment recommendations. For recommendation systems, RecAgent [133] employs hierarchical memory structures to model user interaction patterns across multiple domains. Recent advances like TravelPlanner+ [114] demonstrate how memory-augmented LLMs achieve higher relevance in personalized itinerary generation compared to generic planners." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 386, + 504, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 386, + 504, + 508 + ], + "spans": [ + { + "bbox": [ + 70, + 386, + 504, + 508 + ], + "type": "text", + "content": "(2). Tool and API Calling. The integration of external tools expands agents' capabilities beyond pure linguistic reasoning, enabling agents to interact with users and perform personalized tasks [17, 126, 127, 148, 159]. For instance, VOYAGER [127] establishes a paradigm for lifelong skill acquisition through automatic API curriculum learning and skill library construction. In robotics, Zhang et al. [159] develops a bootstrapping framework where LLMs guide robots in tool-mediated skill discovery, enabling a high success rate in novel object manipulation tasks. The PUMA framework [17] demonstrates how personalized web agents can achieve performance gains in e-commerce tasks through adaptive API orchestration. For mobile interaction, Wang et al. [126] implements few-shot tool learning to handle diverse UI operations with minimal training data. These approaches highlight the importance of tool grounding mechanisms [44] that translate linguistic plans into executable API sequences while maintaining personalization constraints." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 70, + 510, + 504, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 510, + 504, + 589 + ], + "spans": [ + { + "bbox": [ + 70, + 510, + 504, + 589 + ], + "type": "text", + "content": "This synthesis highlights that modern agent systems achieve enhanced personalization through two primary strategies: 1) Memory-augmented architectures, which leverage editable memory graphs [137], reflection mechanisms [87], and hierarchical memory structures [133] to dynamically adapt to user preferences across various domains; and 2) Tool and API integration, which expand agent capabilities by balancing generalization with specialization. Future work may explore improving the contextual relevance and adaptability of memory systems while optimizing real-time tool interaction for seamless task execution." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 601, + 504, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 601, + 504, + 654 + ], + "spans": [ + { + "bbox": [ + 70, + 601, + 504, + 654 + ], + "type": "text", + "content": "4.4.4 Personalized Generation. Based on the foundation of personalized planning and execution mechanisms, which enable agents to adapt strategies to user-specific contexts [44, 159], the next critical concern lies in personalized generation. This capability ensures that generated outputs not only align with factual correctness but also resonate with users' unique preferences, personality traits, and situational needs. Personalized generation bridges the gap between" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 70, + 72, + 212, + 81 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 72, + 212, + 81 + ], + "spans": [ + { + "bbox": [ + 70, + 72, + 212, + 81 + ], + "type": "text", + "content": "A Survey of Personalization: From RAG to Agent" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "spans": [ + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 283, + 658, + 290, + 666 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 658, + 290, + 666 + ], + "spans": [ + { + "bbox": [ + 283, + 658, + 290, + 666 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 96, + 539, + 122 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 96, + 539, + 122 + ], + "spans": [ + { + "bbox": [ + 107, + 96, + 539, + 122 + ], + "type": "text", + "content": "adaptive reasoning and human-aligned outcomes, allowing agents to produce contextually relevant and emotionally appropriate responses." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 140, + 542, + 348 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 107, + 140, + 541, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 140, + 541, + 262 + ], + "spans": [ + { + "bbox": [ + 107, + 140, + 541, + 262 + ], + "type": "text", + "content": "(1). Alignment with User Fact. Alignment with User Fact emphasizes the accuracy, consistency, and factual grounding of personalized responses, ensuring they remain trustworthy across diverse user interactions. This is particularly challenging in personalized agents, where maintaining character authenticity while avoiding hallucinations requires balancing creativity with factual adherence. Recent advances address these challenges through improved training frameworks and evaluation metrics. For instance, Character-LLM [110] integrates memory-augmented architectures to reduce hallucinations while preserving character-specific traits. Wang et al. [135] investigate quantization effects on personality consistency in edge-deployed agents and stabilize outputs under computational constraints. Dai et al. [27] ensures multimodal consistency (text-image) in role-playing. These works highlight the importance of architectural innovations and rigorous evaluation in achieving reliability." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 280, + 542, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 280, + 542, + 348 + ], + "spans": [ + { + "bbox": [ + 107, + 280, + 542, + 348 + ], + "type": "text", + "content": "(2). Alignment with User Preferences. Alignment with user preferences ensures that generated outputs reflect individualized personalities, values, and interaction styles. This requires agents to dynamically interpret implicit user cues and adapt responses accordingly. Wang et al. [139] benchmarks role-specific alignment. Ran et al. [96] improves personality fidelity via psychological scale datasets. Wang et al. [134] quantifies alignment via psychological interviews. Chen et al. [18] evaluates social adaptability in conversations." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 107, + 365, + 539, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 365, + 539, + 390 + ], + "spans": [ + { + "bbox": [ + 107, + 365, + 539, + 390 + ], + "type": "text", + "content": "4.4.5 Discussion. The architectural evolution from RAG to personalized agents introduces significant advancements in human-AI interaction but also surfaces critical challenges that warrant further investigation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 392, + 541, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 392, + 541, + 486 + ], + "spans": [ + { + "bbox": [ + 107, + 392, + 541, + 486 + ], + "type": "text", + "content": "Personalized Understanding, while enabling interpretation of user intent and context, faces limitations in real-time adaptability and generalization. Current approaches like RoleLLM [139] and Character-LLM [110] demonstrate robust role-specific comprehension but struggle with dynamic user state tracking, particularly when handling evolving preferences or multi-session interactions. Furthermore, cultural specificity in benchmarks like CharacterEval [123] reveals gaps in global applicability, as agents trained on region-specific data often fail to generalize across diverse sociocultural contexts. Future work could explore hybrid architectures that combine continuous learning mechanisms with privacy-preserving federated learning to address these adaptability constraints while maintaining user trust." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 489, + 541, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 489, + 541, + 583 + ], + "spans": [ + { + "bbox": [ + 107, + 489, + 541, + 583 + ], + "type": "text", + "content": "Personalized Planning and Execution, achieves remarkable task specialization through memory management and tool integration, yet suffers from scalability issues in complex environments. While frameworks like EMG-RAG [137] and VOYAGER [127] effectively manage user-specific constraints, their reliance on predefined API taxonomies limits emergent tool discovery in novel scenarios. The \"cold-start\" problem persists in domains requiring rapid skill acquisition, as seen in healthcare applications [2], where delayed API responses can compromise decision-making efficacy. A promising direction involves developing meta-reasoning architectures that dynamically prioritize memory recall versus tool invocation based on situational urgency and confidence thresholds." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 585, + 541, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 585, + 541, + 652 + ], + "spans": [ + { + "bbox": [ + 107, + 585, + 541, + 652 + ], + "type": "text", + "content": "Personalized Generation balances factual accuracy with preference alignment but risks over-fitting, where excessive finetuning to user profiles may reinforce cognitive biases. Techniques address surface-level alignment but lack mechanisms for ethical boundary detection. For instance, agents might inadvertently propagate harmful stereotypes when mirroring user preferences without critical oversight. Future systems could integrate value-aligned reinforcement learning with human-in-the-loop validation to preserve authenticity while preventing detrimental customization." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "spans": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 477, + 72, + 539, + 80 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 477, + 72, + 539, + 80 + ], + "spans": [ + { + "bbox": [ + 477, + 72, + 539, + 80 + ], + "type": "text", + "content": "X. Li and P. Jia, et al." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 320, + 658, + 328, + 666 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 658, + 328, + 666 + ], + "spans": [ + { + "bbox": [ + 320, + 658, + 328, + 666 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 73, + 110, + 504, + 387 + ], + "blocks": [ + { + "bbox": [ + 181, + 94, + 391, + 104 + ], + "lines": [ + { + "bbox": [ + 181, + 94, + 391, + 104 + ], + "spans": [ + { + "bbox": [ + 181, + 94, + 391, + 104 + ], + "type": "text", + "content": "Table 2. Datasets and metrics for personalized RAG and Agent." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 73, + 110, + 504, + 387 + ], + "lines": [ + { + "bbox": [ + 73, + 110, + 504, + 387 + ], + "spans": [ + { + "bbox": [ + 73, + 110, + 504, + 387 + ], + "type": "table", + "html": "
FieldMetrics CategoryMetricsDatasets
Pre-retrievalTextual QualityBLEU, ROUGE, EMAvocado Research Email Collection [57, 85], Amazon review [57, 83], Reddit comments[57, 118], Amazon ESCI dataset[82, 97], PIP
Information RetrievalMAP, MRR, NDCG, Precision, Recall, RBPAOL[88, 174], WARRIORS[99], Personalized Results Re-Ranking benchmark [6], delicio.us [9, 15, 144, 172], Flickr [9, 108], CiteULike [10, 14], LRDP [12], Delicious [141], Bibsonomy [79], Wikipedia [8, 33]
ClassificationAccuracy, Macro-F1SCAN [56, 173], AITA WORKSM[53, 80], Robust04 [61]
OthersXEntropy, PMS, Image-Align, PQEC, ProfOverlapAmazon ESCI dataset[82, 97], PIP, Bibsonomy [79]
RetrievalTextual QualityBLEU, ROUGE, Dis, PPLTOPDIAL [130], Pchatbot [93], DuLemon [150]
Information RetrievalRecall, MRR, Precision, F1LiveChat [34], Pchatbot [93], DuLemon [150]
ClassificationAccuracy, SuccTOPDIAL [130], PersonalityEvid [119], DuLemon [150], PersonalityEdit [75]
OthersFluency, Coherence, Plausibility, ES, DD, TPEI, PAEPersonalityEvid [119], PersonalityEdit [75]
GenerationTextual QualityBLEU, ROUGE, Dis, PPL, METEORLaMP [105], Long LaMP [55], Dulemon [150], PGraphRAG [5], AmazonQA/Products [29], Reddit [170], MedicalDialogue [162]
ClassificationAccuracy, F1, Persona F1LaMP [105], Long LaMP [55], Dulemon [150], AmazonQA/Products [29], Reddit [170], MedicalDialogue [162]
RegressionMAE, RMSELaMP [105], Long LaMP [55], PGraphRAG [5]
OthersFluency, Mean Success Rate, Median Relative ImprovementsPersonalized-Gen [3]
AgentTextual QualityBLEU, ROUGE, METEOR, CIDer, EM, Fluency, Coherence, Instruction Adherence, Consistency related metricsRICO [126], RoleBench [139], Shao et al. [110], Socialbench [18], MMRole-Data [27], ROLEPERSONALITY [96], ChatHarui [134], Character-LLM-Data [153], Knowledge Behind Persona [41], Wang et al. [137], Wang et al. [135], Zheng et al. [169]
Information RetrievalRecall, F1, PrecisionKnowledge Behind Persona [41]
ClassificationAccuracy, Failure Rate, Classification Accuracy, Preference Rate, CorrectnessMIT-BIH Arrhythmia Database [148], VirtualHome [44], Socialbench [18], ARC [100], AGIEval [100], HellaSwag [100], MedMCQA [100], AQUA-RAT [100], LogiQA [100], LSAT-AR [100], LSAT-LR [100], LSAT-RC [100], SAT-English [100], SAT-Math [100], PersonalWAB [17], TravelPlanner+ [114]
OthersPass@k, Executability, Productivity, Plausibility of the StoryHong et al. [40], Zheng et al. [169]
", + "image_path": "d86abcf5e5ec9f4d1a1e2cf178e6e2a2411500ea7111c2031609d3b0586d7569.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 404, + 207, + 414 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 404, + 207, + 414 + ], + "spans": [ + { + "bbox": [ + 70, + 404, + 207, + 414 + ], + "type": "text", + "content": "5 EVALUATION AND DATASET" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 421, + 503, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 421, + 503, + 570 + ], + "spans": [ + { + "bbox": [ + 70, + 421, + 503, + 570 + ], + "type": "text", + "content": "In the evolving landscape of personalization, from RAG to advanced Agent-based systems, the evaluation of models relies heavily on diverse datasets and metrics tailored to specific tasks. This survey categorizes metrics into several key types: Textual Quality metrics (e.g., BLEU, ROUGE, METEOR) assess the fluency and coherence of generated outputs; Information Retrieval metrics (e.g., MAP, MRR, Recall) evaluate the accuracy and relevance of retrieved information; Classification metrics (e.g., Accuracy, F1) measure task-specific correctness; Regression metrics (e.g., MAE, RMSE) quantify prediction errors; and Other metrics (e.g., Fluency, Pass@k) address domain-specific or task-unique aspects like plausibility or executability. These metrics span pre-retrieval, retrieval, generation, and agent-based personalization approaches, reflecting their varied objectives. To provide a comprehensive overview, we compile an extensive list of datasets across these fields, as detailed in Table 2. These datasets, paired with their respective metrics, enable researchers to benchmark and refine personalized systems, from enhancing query rewriting to enabling autonomous agents in physical and virtual environments." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 582, + 263, + 593 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 582, + 263, + 593 + ], + "spans": [ + { + "bbox": [ + 70, + 582, + 263, + 593 + ], + "type": "text", + "content": "6 CHALLENGES AND FUTURE DIRECTIONS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 599, + 501, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 599, + 501, + 624 + ], + "spans": [ + { + "bbox": [ + 70, + 599, + 501, + 624 + ], + "type": "text", + "content": "Personalized RAG and agent-based systems still face several critical challenges that warrant further exploration. We list them as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 630, + 501, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 630, + 501, + 654 + ], + "spans": [ + { + "bbox": [ + 70, + 630, + 501, + 654 + ], + "type": "text", + "content": "- Balancing Personalization and Scalability: Integrating personalization data (such as preferences, history, and contextual signals) into RAG processes often increases computational complexity, making it difficult to maintain" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 71, + 72, + 212, + 81 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 72, + 212, + 81 + ], + "spans": [ + { + "bbox": [ + 71, + 72, + 212, + 81 + ], + "type": "text", + "content": "A Survey of Personalization: From RAG to Agent" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 330, + 72, + 501, + 80 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 72, + 501, + 80 + ], + "spans": [ + { + "bbox": [ + 330, + 72, + 501, + 80 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 283, + 658, + 290, + 666 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 658, + 290, + 666 + ], + "spans": [ + { + "bbox": [ + 283, + 658, + 290, + 666 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 114, + 96, + 539, + 121 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 96, + 539, + 121 + ], + "spans": [ + { + "bbox": [ + 114, + 96, + 539, + 121 + ], + "type": "text", + "content": "efficiency and scalability across large-scale systems. Future work could explore lightweight, adaptive embeddings and hybrid frameworks that seamlessly fuse user profiles with real-time contexts." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 124, + 541, + 355 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 107, + 124, + 541, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 124, + 541, + 177 + ], + "spans": [ + { + "bbox": [ + 107, + 124, + 541, + 177 + ], + "type": "text", + "content": "- Evaluating Personalization Effectively: Current metrics like BLEU, ROUGE, and human evaluations fall short in capturing the nuanced alignment of outputs with dynamic user preferences, lacking tailored measures for personalization efficacy. Developing specialized benchmarks and metrics that assess long-term user satisfaction and adaptability is crucial for real-world applicability." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 178, + 541, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 178, + 541, + 245 + ], + "spans": [ + { + "bbox": [ + 107, + 178, + 541, + 245 + ], + "type": "text", + "content": "- Preserving Privacy through Device-Cloud Collaboration: Personalized retrieval often involves processing sensitive user data, raising privacy concerns, especially with the increased global emphasis on data protection regulations, such as the European Union's General Data Protection Regulation (GDPR). Consequently, a promising approach is the collaborative integration of on-device small Language models which handle sensitive personal data locally, with cloud-based LLM, which provides broader contextual knowledge." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 247, + 541, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 247, + 541, + 312 + ], + "spans": [ + { + "bbox": [ + 107, + 247, + 541, + 312 + ], + "type": "text", + "content": "- Personalized Agent Planning: Current research on agent planning remains mainly in its early stages, with much of the work focusing on building foundational frameworks such as GUI agents [81] and the application of agents across diverse domains [131]. Notably, the incorporation of personalized approaches has yet to be widely adopted. Exploring how to integrate personalized support into existing frameworks to enhance user experience represents a promising and valuable direction for future investigation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 316, + 541, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 316, + 541, + 355 + ], + "spans": [ + { + "bbox": [ + 107, + 316, + 541, + 355 + ], + "type": "text", + "content": "- Ensuring Ethical and Coherent Systems: Bias in data processing, privacy concerns in user profiling, and coherence across retrieval and generation stages remain unresolved. Future directions should prioritize ethical safeguards, privacy-preserving techniques, and cross-stage optimization to build trustworthy, unified personalized systems." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 108, + 371, + 185, + 381 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 371, + 185, + 381 + ], + "spans": [ + { + "bbox": [ + 108, + 371, + 185, + 381 + ], + "type": "text", + "content": "7 CONCLUSION" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 388, + 541, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 388, + 541, + 482 + ], + "spans": [ + { + "bbox": [ + 107, + 388, + 541, + 482 + ], + "type": "text", + "content": "In this paper, we explore the landscape of personalization from Retrieval-Augmented Generation (RAG) to advanced LLM-based Agents, detailing adaptations across pre-retrieval, retrieval, and generation stages while extending into agentic capabilities. By reviewing recent literature, datasets, and metrics, we highlight the progress and diversity in enhancing user satisfaction through tailored AI systems. However, challenges such as scalability, effective evaluation, and ethical concerns underscore the need for innovative solutions. Future research should focus on lightweight frameworks, specialized benchmarks, and privacy-preserving techniques to advance personalized AI. Relevant papers and resources are also compiled online for ease of future research." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 108, + 499, + 167, + 509 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 499, + 167, + 509 + ], + "spans": [ + { + "bbox": [ + 108, + 499, + 167, + 509 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 114, + 514, + 540, + 653 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 114, + 514, + 492, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 514, + 492, + 523 + ], + "spans": [ + { + "bbox": [ + 114, + 514, + 492, + 523 + ], + "type": "text", + "content": "[1] 2021. BERT: a review of applications in natural language processing and understanding. arXiv preprint arXiv:2103.11943 (2021)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 114, + 524, + 539, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 524, + 539, + 542 + ], + "spans": [ + { + "bbox": [ + 114, + 524, + 539, + 542 + ], + "type": "text", + "content": "[2] Mahyar Abbasian, Iman Azimi, Amir M Rahmani, and Ramesh Jain. 2023. Conversational health agents: A personalized llm-powered agent framework. arXiv preprint arXiv:2310.02374 (2023)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 114, + 544, + 539, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 544, + 539, + 563 + ], + "spans": [ + { + "bbox": [ + 114, + 544, + 539, + 563 + ], + "type": "text", + "content": "[3] Bashar Alhafni, Vivek Kulkarni, Dhruv Kumar, and Vipul Raheja. 2024. Personalized Text Generation with Fine-Grained Linguistic Control. In Proceedings of the 1st Workshop on Personalization of Generative AI Systems (PERSONALIZE 2024). 88–101." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 115, + 563, + 427, + 573 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 563, + 427, + 573 + ], + "spans": [ + { + "bbox": [ + 115, + 563, + 427, + 573 + ], + "type": "text", + "content": "[4] Amazon. [n.d.]. Amazon Customer Review Dataset. Online dataset. https://nijianmo.github.io/amazon/" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 115, + 574, + 540, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 574, + 540, + 601 + ], + "spans": [ + { + "bbox": [ + 115, + 574, + 540, + 601 + ], + "type": "text", + "content": "[5] Steven Au, Cameron J Dimacali, Ojasmitha Pedirappagari, Namyong Park, Franck Dernoncourt, Yu Wang, Nikos Kanakaris, Hanieh Deilamsalehy, Ryan A Rossi, and Nesreen K Ahmed. 2025. Personalized Graph-Based Retrieval for Large Language Models. arXiv preprint arXiv:2501.02157 (2025)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 115, + 603, + 539, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 603, + 539, + 622 + ], + "spans": [ + { + "bbox": [ + 115, + 603, + 539, + 622 + ], + "type": "text", + "content": "[6] Elias Bassani, Pranav Kasela, Alessandro Raganato, and Gabriella Pasi. 2022. A multi-domain benchmark for personalized search evaluation. In Proceedings of the 31st ACM International Conference on Information & Knowledge Management. 3822-3827." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 115, + 624, + 539, + 642 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 624, + 539, + 642 + ], + "spans": [ + { + "bbox": [ + 115, + 624, + 539, + 642 + ], + "type": "text", + "content": "[7] Elias Bassani, Nicola Tonellotto, and Gabriella Pasi. 2023. Personalized query expansion with contextual word embeddings. ACM Transactions on Information Systems 42, 2 (2023), 1-35." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 114, + 643, + 527, + 653 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 643, + 527, + 653 + ], + "spans": [ + { + "bbox": [ + 114, + 643, + 527, + 653 + ], + "type": "text", + "content": "[8] Oliver Baumann and Mirco Schoenfeld. 2024. PSQE: Personalized Semantic Query Expansion for user-centric query disambiguation. (2024)." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "spans": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 477, + 72, + 538, + 80 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 477, + 72, + 538, + 80 + ], + "spans": [ + { + "bbox": [ + 477, + 72, + 538, + 80 + ], + "type": "text", + "content": "X. Li and P. Jia, et al." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 320, + 658, + 328, + 666 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 658, + 328, + 666 + ], + "spans": [ + { + "bbox": [ + 320, + 658, + 328, + 666 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 75, + 98, + 503, + 645 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 78, + 98, + 503, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 98, + 503, + 126 + ], + "spans": [ + { + "bbox": [ + 78, + 98, + 503, + 126 + ], + "type": "text", + "content": "[9] Matthias Bender, Tom Crecelius, Mouna Kacimi, Sebastian Michel, Thomas Neumann, Josiane Xavier Parreira, Ralf Schenkel, and Gerhard Weikum. 2008. Exploiting social relations for query expansion and result ranking. In 2008 IEEE 24th International Conference on Data Engineering Workshop. IEEE, 501-506." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 75, + 127, + 503, + 147 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 127, + 503, + 147 + ], + "spans": [ + { + "bbox": [ + 75, + 127, + 503, + 147 + ], + "type": "text", + "content": "[10] Marin Bertier, Rachid Guerraoui, Vincent Leroy, and Anne-Marie Kermarrec. 2009. Toward personalized query expansion. In Proceedings of the Second ACM EuroSys Workshop on Social Network Systems. 7-12." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 75, + 148, + 503, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 148, + 503, + 167 + ], + "spans": [ + { + "bbox": [ + 75, + 148, + 503, + 167 + ], + "type": "text", + "content": "[11] Keping Bi, Qingyao Ai, and W Bruce Croft. 2021. Learning a fine-grained review-based transformer model for personalized product search. In Proceedings of the 44th international ACM SIGIR conference on research and development in information retrieval. 123-132." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 75, + 167, + 503, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 167, + 503, + 186 + ], + "spans": [ + { + "bbox": [ + 75, + 167, + 503, + 186 + ], + "type": "text", + "content": "[12] Claudio Biancalana and Alessandro Micarelli. 2009. Social tagging in query expansion: A new way for personalized web search. In 2009 International Conference on Computational Science and Engineering, Vol. 4. IEEE, 1060-1065." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 75, + 187, + 282, + 197 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 187, + 282, + 197 + ], + "spans": [ + { + "bbox": [ + 75, + 187, + 282, + 197 + ], + "type": "text", + "content": "[13] Microsoft Bing. [n.d]. Bing Search Engine. https://www.bing.com" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 75, + 198, + 503, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 198, + 503, + 217 + ], + "spans": [ + { + "bbox": [ + 75, + 198, + 503, + 217 + ], + "type": "text", + "content": "[14] Mohamed Reda Bouadjenek, Hakim Hacid, and Mokrane Bouzeghoub. 2019. Personalized social query expansion using social annotations. Transactions on Large-Scale Data-and Knowledge-Centered Systems XL (2019), 1-25." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 75, + 217, + 503, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 217, + 503, + 246 + ], + "spans": [ + { + "bbox": [ + 75, + 217, + 503, + 246 + ], + "type": "text", + "content": "[15] Mohamed Reda Bouadjenek, Hakim Hacid, Mokrane Bouzeghoub, and Johann Daigremont. 2011. Personalized social query expansion using social bookmarking systems. In Proceedings of the 34th international ACM SIGIR conference on Research and development in Information Retrieval. 1113-1114." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 75, + 247, + 486, + 256 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 247, + 486, + 256 + ], + "spans": [ + { + "bbox": [ + 75, + 247, + 486, + 256 + ], + "type": "text", + "content": "[16] Domenico Bulfamante. 2023. Generative enterprise search with extensible knowledge base using ai. Ph.D. Dissertation. Politecnico di Torino." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 75, + 258, + 503, + 276 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 258, + 503, + 276 + ], + "spans": [ + { + "bbox": [ + 75, + 258, + 503, + 276 + ], + "type": "text", + "content": "[17] Hongru Cai, Yongqi Li, Wenjie Wang, ZHU Fengbin, Xiaoyu Shen, Wenjie Li, and Tat-Seng Chua. [n. d]. Large Language Models Empowered Personalized Web Agents. In THE WEB CONFERENCE 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 75, + 277, + 503, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 277, + 503, + 296 + ], + "spans": [ + { + "bbox": [ + 75, + 277, + 503, + 296 + ], + "type": "text", + "content": "[18] Hongzhan Chen, Hehong Chen, Ming Yan, Wenshen Xu, Xing Gao, Weizhou Shen, Xiaojun Quan, Chenliang Li, Ji Zhang, Fei Huang, et al. 2024. Socialbench: Sociality evaluation of role-playing conversational agents. arXiv preprint arXiv:2403.13679 (2024)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 75, + 297, + 503, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 297, + 503, + 316 + ], + "spans": [ + { + "bbox": [ + 75, + 297, + 503, + 316 + ], + "type": "text", + "content": "[19] Jiangjie Chen, Xintao Wang, Rui Xu, Siyu Yuan, Yikai Zhang, Wei Shi, Jian Xie, Shuang Li, Ruihan Yang, Tinghui Zhu, et al. 2024. From persona to personalization: A survey on role-playing language agents. arXiv preprint arXiv:2404.18231 (2024)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 75, + 317, + 503, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 317, + 503, + 336 + ], + "spans": [ + { + "bbox": [ + 75, + 317, + 503, + 336 + ], + "type": "text", + "content": "[20] Ruizhe Chen, Xiaotian Zhang, Meng Luo, Wenhao Chai, and Zuozhu Liu. 2024. Pad: Personalized alignment of llms at decoding-time. arXiv preprint arXiv:2410.04070 (2024)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 75, + 337, + 503, + 356 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 337, + 503, + 356 + ], + "spans": [ + { + "bbox": [ + 75, + 337, + 503, + 356 + ], + "type": "text", + "content": "[21] Weijie Chen, Ting Bai, Jinbo Su, Jian Luan, Wei Liu, and Chuan Shi. 2024. Kg-retriever: Efficient knowledge indexing for retrieval-augmented large language models. arXiv preprint arXiv:2412.05547 (2024)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 75, + 357, + 503, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 357, + 503, + 376 + ], + "spans": [ + { + "bbox": [ + 75, + 357, + 503, + 376 + ], + "type": "text", + "content": "[22] Zheng Chen, Ziyan Jiang, Fan Yang, Eunah Cho, Xing Fan, Xiaojiang Huang, Yanbin Lu, and Aram Galstyan. 2023. Graph meets LLM: A novel approach to collaborative filtering for robust conversational understanding. arXiv preprint arXiv:2305.14449 (2023)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 75, + 376, + 503, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 376, + 503, + 396 + ], + "spans": [ + { + "bbox": [ + 75, + 376, + 503, + 396 + ], + "type": "text", + "content": "[23] Hao Cheng, Shuo Wang, Wensheng Lu, Wei Zhang, Mingyang Zhou, Kezhong Lu, and Hao Liao. 2023. Explainable recommendation with personalized review retrieval and aspect learning. arXiv preprint arXiv:2306.12657 (2023)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 75, + 396, + 503, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 396, + 503, + 415 + ], + "spans": [ + { + "bbox": [ + 75, + 396, + 503, + 415 + ], + "type": "text", + "content": "[24] Alexis Chevalier, Alexander Wettig, Anirudh Ajith, and Danqi Chen. 2023. Adapting language models to compress contexts. arXiv preprint arXiv:2305.14788 (2023)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 75, + 417, + 503, + 436 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 417, + 503, + 436 + ], + "spans": [ + { + "bbox": [ + 75, + 417, + 503, + 436 + ], + "type": "text", + "content": "[25] Eunah Cho, Ziyan Jiang, Jie Hao, Zheng Chen, Saurabh Gupta, Xing Fan, and Chenlei Guo. 2021. Personalized search-based query rewrite system for conversational ai. In Proceedings of the 3rd Workshop on Natural Language Processing for Conversational AI. 179-188." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 75, + 437, + 503, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 437, + 503, + 456 + ], + "spans": [ + { + "bbox": [ + 75, + 437, + 503, + 456 + ], + "type": "text", + "content": "[26]Hyundong Cho, Karishma Sharma, Nicolaas Jedema, Leonardo FR Ribeiro, Alessandro Moschitti, Ravi Krishnan, and Jonathan May. 2025. TuningFree Personalized Alignment via Trial-Error-Explain In-Context Learning. arXiv preprint arXiv:2502.08972 (2025)." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 75, + 456, + 503, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 456, + 503, + 475 + ], + "spans": [ + { + "bbox": [ + 75, + 456, + 503, + 475 + ], + "type": "text", + "content": "[27] Yanqi Dai, Huanran Hu, Lei Wang, Shengjie Jin, Xu Chen, and Zhiwu Lu. 2024. Mmrole: A comprehensive framework for developing and evaluating multimodal role-playing agents. arXiv preprint arXiv:2408.04203 (2024)." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 75, + 476, + 503, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 476, + 503, + 495 + ], + "spans": [ + { + "bbox": [ + 75, + 476, + 503, + 495 + ], + "type": "text", + "content": "[28] Wenlong Deng, Christos Thrampoulidis, and Xiaoxiao Li. 2024. Unlocking the potential of prompt-tuning in bridging generalized and personalized federated learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 6087-6097." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 75, + 496, + 503, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 496, + 503, + 515 + ], + "spans": [ + { + "bbox": [ + 75, + 496, + 503, + 515 + ], + "type": "text", + "content": "[29] Yang Deng, Yaliang Li, Wenxuan Zhang, Bolin Ding, and Wai Lam. 2022. Toward personalized answer generation in e-commerce via multiperspective preference modeling. ACM Transactions on Information Systems (TOIS) 40, 4 (2022), 1-28." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 75, + 516, + 503, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 516, + 503, + 535 + ], + "spans": [ + { + "bbox": [ + 75, + 516, + 503, + 535 + ], + "type": "text", + "content": "[30] Matthijs Douze, Alexandr Guzhva, Chengqi Deng, Jeff Johnson, Gergely Szilvasy, Pierre-Emmanuel Mazaré, Maria Lomeli, Lucas Hosseini, and Hervé Jégou. 2024. The Faiss library. (2024). arXiv:2401.08281 [cs.LG]" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 75, + 536, + 264, + 545 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 536, + 264, + 545 + ], + "spans": [ + { + "bbox": [ + 75, + 536, + 264, + 545 + ], + "type": "text", + "content": "[31] ESPN. [n.d.]. ESPN Sports Statistics Dataset. Online dataset." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 75, + 546, + 503, + 574 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 546, + 503, + 574 + ], + "spans": [ + { + "bbox": [ + 75, + 546, + 503, + 574 + ], + "type": "text", + "content": "[32] Wenqi Fan, Yujuan Ding, Liangbo Ning, Shijie Wang, Hengyun Li, Dawei Yin, Tat-Seng Chua, and Qing Li. 2024. A survey on rag meeting llms: Towards retrieval-augmented large language models. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 6491-6501." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 75, + 576, + 503, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 576, + 503, + 594 + ], + "spans": [ + { + "bbox": [ + 75, + 576, + 503, + 594 + ], + "type": "text", + "content": "[33] Byron J Gao, David C Anastasiu, and Xing Jiang. 2010. Utilizing user-input contextual terms for query disambiguation. In Coling 2010: Posters. 329-337." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 75, + 596, + 503, + 615 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 596, + 503, + 615 + ], + "spans": [ + { + "bbox": [ + 75, + 596, + 503, + 615 + ], + "type": "text", + "content": "[34] Jingsheng Gao, Yixin Lian, Ziyi Zhou, Yuzhuo Fu, and Baoyuan Wang. 2023. LiveChat: A large-scale personalized dialogue dataset automatically constructed from live streaming. arXiv preprint arXiv:2306.08401 (2023)." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 75, + 616, + 503, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 616, + 503, + 635 + ], + "spans": [ + { + "bbox": [ + 75, + 616, + 503, + 635 + ], + "type": "text", + "content": "[35] Yunfan Gao, Yun Xiong, Xinyu Gao, Kangxiang Jia, Jinliu Pan, Yuxi Bi, Yi Dai, Jiawei Sun, Haofen Wang, and Haofen Wang. 2023. Retrievalaugmented generation for large language models: A survey. arXiv preprint arXiv:2312.10997 2 (2023)." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 75, + 635, + 251, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 635, + 251, + 645 + ], + "spans": [ + { + "bbox": [ + 75, + 635, + 251, + 645 + ], + "type": "text", + "content": "[36] Google. [n.d.]. Google Search. https://www.google.com" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 71, + 72, + 212, + 81 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 72, + 212, + 81 + ], + "spans": [ + { + "bbox": [ + 71, + 72, + 212, + 81 + ], + "type": "text", + "content": "A Survey of Personalization: From RAG to Agent" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "spans": [ + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 283, + 658, + 291, + 666 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 658, + 291, + 666 + ], + "spans": [ + { + "bbox": [ + 283, + 658, + 291, + 666 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 98, + 539, + 655 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 111, + 98, + 539, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 98, + 539, + 126 + ], + "spans": [ + { + "bbox": [ + 111, + 98, + 539, + 126 + ], + "type": "text", + "content": "[37] Jia-Chen Gu, Hui Liu, Zhen-Hua Ling, Quan Liu, Zhigang Chen, and Xiaodan Zhu. 2021. Partner matters! an empirical study on fusing personas for personalized response selection in retrieval-based chatbots. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval. 565-574." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 127, + 539, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 127, + 539, + 156 + ], + "spans": [ + { + "bbox": [ + 111, + 127, + 539, + 156 + ], + "type": "text", + "content": "[38] Jie Hao, Yang Liu, Xing Fan, Saurabh Gupta, Saleh Soltan, Rakesh Chada, Pradeep Natarajan, Chenlei Guo, and Gokhan Tur. 2022. CGF: Constrained generation framework for query rewriting in conversational AI. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track. 475-483." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 157, + 539, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 157, + 539, + 176 + ], + "spans": [ + { + "bbox": [ + 111, + 157, + 539, + 176 + ], + "type": "text", + "content": "[39] Nicola Henze, Peter Dolog, and Wolfgang Nejdl. 2004. Reasoning and ontologies for personalized e-learning in the semantic web. Journal of Educational Technology & Society 7, 4 (2004), 82-97." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 177, + 539, + 197 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 177, + 539, + 197 + ], + "spans": [ + { + "bbox": [ + 111, + 177, + 539, + 197 + ], + "type": "text", + "content": "[40] Sirui Hong, Xiawu Zheng, Jonathan Chen, Yuheng Cheng, Jinlin Wang, Ceyao Zhang, Zili Wang, Steven Ka Shing Yau, Zijuan Lin, Liyang Zhou, et al. 2023. Metagpt: Meta programming for multi-agent collaborative framework. arXiv preprint arXiv:2308.00352 3, 4 (2023), 6." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 198, + 539, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 198, + 539, + 226 + ], + "spans": [ + { + "bbox": [ + 111, + 198, + 539, + 226 + ], + "type": "text", + "content": "[41] WANG Hongru, Minda Hu, Yang Deng, Rui Wang, Fei Mi, Weichao Wang, Yasheng Wang, Wai-Chung Kwan, Irwin King, and Kam-Fai Wong. [n. d]. Large Language Models as Source Planner for Personalized Knowledge-grounded Dialogues. In The 2023 Conference on Empirical Methods in Natural Language Processing." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 227, + 539, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 227, + 539, + 247 + ], + "spans": [ + { + "bbox": [ + 111, + 227, + 539, + 247 + ], + "type": "text", + "content": "[42] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. 2022. Lora: Low-rank adaptation of large language models. ICLR 1, 2 (2022), 3." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 247, + 539, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 247, + 539, + 266 + ], + "spans": [ + { + "bbox": [ + 111, + 247, + 539, + 266 + ], + "type": "text", + "content": "[43] Qiushi Huang, Shuai Fu, Xubo Liu, Wenwu Wang, Tom Ko, Yu Zhang, and Lilian Tang. 2024. Learning retrieval augmentation for personalized dialogue generation. arXiv preprint arXiv:2406.18847 (2024)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 267, + 539, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 267, + 539, + 286 + ], + "spans": [ + { + "bbox": [ + 111, + 267, + 539, + 286 + ], + "type": "text", + "content": "[44] Wenlong Huang, Pieter Abbeel, Deepak Pathak, and Igor Mordatch. 2022. Language models as zero-shot planners: Extracting actionable knowledge for embodied agents. In International conference on machine learning. PMLR, 9118-9147." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 287, + 539, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 287, + 539, + 306 + ], + "spans": [ + { + "bbox": [ + 111, + 287, + 539, + 306 + ], + "type": "text", + "content": "[45] Xu Huang, Weiwen Liu, Xiaolong Chen, Xingmei Wang, Hao Wang, Defu Lian, Yasheng Wang, Ruiming Tang, and Enhong Chen. 2024. Understanding the planning of LLM agents: A survey. arXiv preprint arXiv:2402.02716 (2024)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 307, + 539, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 307, + 539, + 326 + ], + "spans": [ + { + "bbox": [ + 111, + 307, + 539, + 326 + ], + "type": "text", + "content": "[46] Rolf Jagerman, Honglei Zhuang, Zhen Qin, Xuanhui Wang, and Michael Bendersky. 2023. Query expansion by prompting large language models. arXiv preprint arXiv:2305.03653 (2023)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 327, + 539, + 355 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 327, + 539, + 355 + ], + "spans": [ + { + "bbox": [ + 111, + 327, + 539, + 355 + ], + "type": "text", + "content": "[47] Joel Jang, Seungone Kim, Bill Yuchen Lin, Yizhong Wang, Jack Hessel, Luke Zettlemoyer, Hannaneh Hajishirzi, Yejin Choi, and Prithviraj Ammanabrolu. 2023. Personalized soups: Personalized large language model alignment via post-hoc parameter merging. arXiv preprint arXiv:2310.11564 (2023)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 356, + 539, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 356, + 539, + 386 + ], + "spans": [ + { + "bbox": [ + 111, + 356, + 539, + 386 + ], + "type": "text", + "content": "[48] Pengyue Jia, Yiding Liu, Xiangyu Zhao, Xiaopeng Li, Changying Hao, Shuaiqiang Wang, and Dawei Yin. 2024. MILL: Mutual Verification with Large Language Models for Zero-Shot Query Expansion. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers). 2498-2518." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 387, + 539, + 406 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 387, + 539, + 406 + ], + "spans": [ + { + "bbox": [ + 111, + 387, + 539, + 406 + ], + "type": "text", + "content": "[49] Guangyuan Jiang, Manjie Xu, Song-Chun Zhu, Wenjuan Han, Chi Zhang, and Yixin Zhu. 2023. Evaluating and inducing personality in pre-trained language models. Advances in Neural Information Processing Systems 36 (2023), 10622-10643." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 407, + 539, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 407, + 539, + 435 + ], + "spans": [ + { + "bbox": [ + 111, + 407, + 539, + 435 + ], + "type": "text", + "content": "[50] Hideaki Joko, Shubham Chatterjee, Andrew Ramsay, Arjen P De Vries, Jeff Dalton, and Faegheh Hasibi. 2024. Doing personal laps: Llm-augmented dialogue construction for personalized multi-session conversational search. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 796-806." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 436, + 539, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 436, + 539, + 456 + ], + "spans": [ + { + "bbox": [ + 111, + 436, + 539, + 456 + ], + "type": "text", + "content": "[51] Wang-Cheng Kang, Jianmo Ni, Nikhil Mehta, Maheswaran Sathiamoorthy, Lichan Hong, Ed Chi, and Derek Zhiyuan Cheng. 2023. Do llms understand user preferences? evaluating llms on user rating prediction. arXiv preprint arXiv:2305.06474 (2023)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 111, + 456, + 539, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 456, + 539, + 475 + ], + "spans": [ + { + "bbox": [ + 111, + 456, + 539, + 475 + ], + "type": "text", + "content": "[52] Manojkumar Rangasamy Kannadasan and Grigor Aslanyan. 2019. Personalized query auto-completion through a lightweight representation of the user context. arXiv preprint arXiv:1905.01386 (2019)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 111, + 476, + 539, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 476, + 539, + 515 + ], + "spans": [ + { + "bbox": [ + 111, + 476, + 539, + 515 + ], + "type": "text", + "content": "[53] Anjuli Kannan, Karol Kurach, Sujith Ravi, Tobias Kaufmann, Andrew Tomkins, Balint Miklos, Greg Corrado, Laszlo Lukacs, Marina Ganea, Peter Young, and Vivek Ramavajjala. 2016. Smart Reply: Automated Response Suggestion for Email. In Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (San Francisco, California, USA) (KDD '16). Association for Computing Machinery, New York, NY, USA, 955-964. https://doi.org/10.1145/2939672.2939801" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 111, + 516, + 539, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 516, + 539, + 535 + ], + "spans": [ + { + "bbox": [ + 111, + 516, + 539, + 535 + ], + "type": "text", + "content": "[54] Mandar Kulkarni, Praveen Tangarajan, Kyung Kim, and Anusua Trivedi. 2024. Reinforcement learning for optimizing rag for domain chatbots. arXiv preprint arXiv:2401.06800 (2024)." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 111, + 536, + 539, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 536, + 539, + 555 + ], + "spans": [ + { + "bbox": [ + 111, + 536, + 539, + 555 + ], + "type": "text", + "content": "[55] Ishita Kumar, Snigdha Viswanathan, Sushrita Yerra, Alireza Salemi, Ryan A Rossi, Franck Dernoncourt, Hanieh Deilamsalehy, Xiang Chen, Ruiyi Zhang, Shubham Agarwal, et al. 2024. Longlamp: A benchmark for personalized long-form text generation. arXiv preprint arXiv:2407.11016 (2024)." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 111, + 556, + 539, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 556, + 539, + 575 + ], + "spans": [ + { + "bbox": [ + 111, + 556, + 539, + 575 + ], + "type": "text", + "content": "[56] Brenden Lake and Marco Baroni. 2018. Generalization without systematicity: On the compositional skills of sequence-to-sequence recurrent networks. In International conference on machine learning. PMLR, 2873-2882." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 111, + 576, + 539, + 595 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 576, + 539, + 595 + ], + "spans": [ + { + "bbox": [ + 111, + 576, + 539, + 595 + ], + "type": "text", + "content": "[57] Cheng Li, Mingyang Zhang, Qiao zhu Mei, Weize Kong, and Michael Bendersky. 2024. Learning to rewrite prompts for personalized text generation. In Proceedings of the ACM Web Conference 2024. 3367-3378." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 111, + 596, + 539, + 615 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 596, + 539, + 615 + ], + "spans": [ + { + "bbox": [ + 111, + 596, + 539, + 615 + ], + "type": "text", + "content": "[58] Changhao Li, Yuchen Zhuang, Rushi Qiang, Haotian Sun, Hanjun Dai, Chao Zhang, and Bo Dai. 2024. Matryoshka: Learning to Drive Black-Box LLMs with LLMs. arXiv preprint arXiv:2410.20749 (2024)." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 111, + 616, + 539, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 616, + 539, + 635 + ], + "spans": [ + { + "bbox": [ + 111, + 616, + 539, + 635 + ], + "type": "text", + "content": "[59] Lei Li, Yongfeng Zhang, and Li Chen. 2023. Personalized prompt learning for explainable recommendation. ACM Transactions on Information Systems 41, 4 (2023), 1-26." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 111, + 635, + 539, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 635, + 539, + 655 + ], + "spans": [ + { + "bbox": [ + 111, + 635, + 539, + 655 + ], + "type": "text", + "content": "[60] Sen Li, Fuyu Lv, Taiwei Jin, Guiyang Li, Yukun Zheng, Tao Zhuang, Qingwen Liu, Xiaoyi Zeng, James Kwok, and Qianli Ma. 2022. Query rewriting in taobao search. In Proceedings of the 31st ACM International Conference on Information & Knowledge Management. 3262-3271." + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "spans": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 477, + 72, + 539, + 80 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 477, + 72, + 539, + 80 + ], + "spans": [ + { + "bbox": [ + 477, + 72, + 539, + 80 + ], + "type": "text", + "content": "X. Li and P. Jia, et al." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 320, + 658, + 327, + 666 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 658, + 327, + 666 + ], + "spans": [ + { + "bbox": [ + 320, + 658, + 327, + 666 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 75, + 98, + 503, + 655 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 75, + 98, + 503, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 98, + 503, + 117 + ], + "spans": [ + { + "bbox": [ + 75, + 98, + 503, + 117 + ], + "type": "text", + "content": "[61] Xiaopeng Li, Lixin Su, Pengyue Jia, Xiangyu Zhao, Suqi Cheng, Junfeng Wang, and Dawei Yin. 2023. Agent4ranking: Semantic robust ranking via personalized query rewriting using multi-agent llm. arXiv preprint arXiv:2312.15450 (2023)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 75, + 118, + 503, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 118, + 503, + 137 + ], + "spans": [ + { + "bbox": [ + 75, + 118, + 503, + 137 + ], + "type": "text", + "content": "[62] Xinyu Li, Ruiyang Zhou, Zachary C Lipton, and Liu Leqi. 2024. Personalized language modeling from personalized human feedback. arXiv preprint arXiv:2402.05133 (2024)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 75, + 138, + 502, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 138, + 502, + 157 + ], + "spans": [ + { + "bbox": [ + 75, + 138, + 502, + 157 + ], + "type": "text", + "content": "[63] Yuanchun Li, Hao Wen, Weijun Wang, Xiangyu Li, Yizhen Yuan, Guohong Liu, Jiacheng Liu, Wenxing Xu, Xiang Wang, Yi Sun, et al. 2024. Personal llm agents: Insights and survey about the capability, efficiency and security. arXiv preprint arXiv:2401.05459 (2024)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 75, + 158, + 502, + 177 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 158, + 502, + 177 + ], + "spans": [ + { + "bbox": [ + 75, + 158, + 502, + 177 + ], + "type": "text", + "content": "[64] Zehan Li, Xin Zhang, Yanzhao Zhang, Dingkun Long, Pengjun Xie, and Meishan Zhang. 2023. Towards general text embeddings with multi-stage contrastive learning. arXiv preprint arXiv:2308.03281 (2023)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 75, + 178, + 503, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 178, + 503, + 205 + ], + "spans": [ + { + "bbox": [ + 75, + 178, + 503, + 205 + ], + "type": "text", + "content": "[65] Ruixue Lian, Sixing Lu, Clint Solomon, Gustavo Aguilar, Pragaash Ponnusamy, Jialong Han, Chengyuan Ma, and Chenlei Guo. 2023. PersonalTM: Transformer memory for personalized retrieval. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval. 2256-2260." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 75, + 207, + 503, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 207, + 503, + 226 + ], + "spans": [ + { + "bbox": [ + 75, + 207, + 503, + 226 + ], + "type": "text", + "content": "[66] Shan-Mu Lin and Chuen-Min Huang. 2006. Personalized optimal search in local query expansion. In Proceedings of the 18th Conference on Computational Linguistics and Speech Processing. 221-236." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 75, + 228, + 503, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 228, + 503, + 246 + ], + "spans": [ + { + "bbox": [ + 75, + 228, + 503, + 246 + ], + "type": "text", + "content": "[67] Junling Liu, Chao Liu, Peilin Zhou, Renjie Lv, Kang Zhou, and Yan Zhang. 2023. Is chatgpt a good recommender? a preliminary study. arXiv preprint arXiv:2304.10149 (2023)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 75, + 247, + 503, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 247, + 503, + 266 + ], + "spans": [ + { + "bbox": [ + 75, + 247, + 503, + 266 + ], + "type": "text", + "content": "[68] Jiahong Liu, Zexuan Qiu, Zhongyang Li, Quanyu Dai, Jieming Zhu, Minda Hu, Menglin Yang, and Irwin King. 2025. A Survey of Personalized Large Language Models: Progress and Future Directions. arXiv preprint arXiv:2502.11528 (2025)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 75, + 267, + 503, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 267, + 503, + 286 + ], + "spans": [ + { + "bbox": [ + 75, + 267, + 503, + 286 + ], + "type": "text", + "content": "[69] Nelson F Liu, Kevin Lin, John Hewitt, Ashwin Paranjape, Michele Bevilacqua, Fabio Petroni, and Percy Liang. 2024. Lost in the middle: How language models use long contexts. Transactions of the Association for Computational Linguistics 12 (2024), 157-173." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 75, + 287, + 503, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 287, + 503, + 306 + ], + "spans": [ + { + "bbox": [ + 75, + 287, + 503, + 306 + ], + "type": "text", + "content": "[70] Qijiong Liu, Nuo Chen, Tetsuya Sakai, and Xiao-Ming Wu. 2024. Once: Boosting content-based recommendation with both open-and closed-source large language models. In Proceedings of the 17th ACM International Conference on Web Search and Data Mining. 452-461." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 75, + 307, + 503, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 307, + 503, + 326 + ], + "spans": [ + { + "bbox": [ + 75, + 307, + 503, + 326 + ], + "type": "text", + "content": "[71] Shuai Liu, Hyundong J Cho, Marjorie Freedman, Xuezhe Ma, and Jonathan May. 2023. RECAP: retrieval-enhanced context-aware prefix encoder for personalized dialogue response generation. arXiv preprint arXiv:2306.07206 (2023)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 75, + 327, + 479, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 327, + 479, + 336 + ], + "spans": [ + { + "bbox": [ + 75, + 327, + 479, + 336 + ], + "type": "text", + "content": "[72] Tyler Lu and Craig Boutilier. 2011. Budgeted social choice: From consensus to personalized decision making. In *IJCAI*, Vol. 11, 280-286." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 75, + 338, + 503, + 356 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 338, + 503, + 356 + ], + "spans": [ + { + "bbox": [ + 75, + 338, + 503, + 356 + ], + "type": "text", + "content": "[73] Zhengyi Ma, Zhicheng Dou, Yutao Zhu, Hanxun Zhong, and Ji-Rong Wen. 2021. One chatbot per person: Creating personalized chatbots based on implicit user profiles. In Proceedings of the 44th international ACM SIGIR conference on research and development in information retrieval. 555-564." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 75, + 357, + 503, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 357, + 503, + 376 + ], + "spans": [ + { + "bbox": [ + 75, + 357, + 503, + 376 + ], + "type": "text", + "content": "[74] Aman Madaan, Niket Tandon, Peter Clark, and Yiming Yang. 2022. Memory-assisted prompt editing to improve GPT-3 after deployment. arXiv preprint arXiv:2201.06009 (2022)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 75, + 376, + 503, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 376, + 503, + 396 + ], + "spans": [ + { + "bbox": [ + 75, + 376, + 503, + 396 + ], + "type": "text", + "content": "[75] Shengyu Mao, Xiaohan Wang, Mengru Wang, Yong Jiang, Pengjun Xie, Fei Huang, and Ningyu Zhang. 2024. Editing Personality for Large Language Models. In CCF International Conference on Natural Language Processing and Chinese Computing. Springer, 241-254." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 75, + 397, + 503, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 397, + 503, + 415 + ], + "spans": [ + { + "bbox": [ + 75, + 397, + 503, + 415 + ], + "type": "text", + "content": "[76] Yuren Mao, Xuemei Dong, Wenyi Xu, Yunjun Gao, Bin Wei, and Ying Zhang. 2024. Fit-rag: black-box rag with factual information and token reduction. arXiv preprint arXiv:2403.14374 (2024)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 75, + 417, + 503, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 417, + 503, + 445 + ], + "spans": [ + { + "bbox": [ + 75, + 417, + 503, + 445 + ], + "type": "text", + "content": "[77] Puneet Mathur, Zhe Liu, Ke Li, Yingyi Ma, Gil Keren, Zeeshan Ahmed, Dinesh Manocha, and Xuedong Zhang. 2023. Personal: Language model personalization via domain-distributed span aggregated k-nearest n-gram retrieval augmentation. In Findings of the Association for Computational Linguistics: EMNLP 2023. 11314-11328." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 75, + 447, + 503, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 447, + 503, + 465 + ], + "spans": [ + { + "bbox": [ + 75, + 447, + 503, + 465 + ], + "type": "text", + "content": "[78] Fatemehsadat Mireshghallah, Vaishnavi Shrivastava, Milad Shokouhi, Taylor Berg-Kirkpatrick, Robert Sim, and Dimitrios Dimitriadis. 2021. Identifier: Implicit user representations for simple and effective personalized sentiment analysis. arXiv preprint arXiv:2110.00135 (2021)." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 75, + 467, + 503, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 467, + 503, + 485 + ], + "spans": [ + { + "bbox": [ + 75, + 467, + 503, + 485 + ], + "type": "text", + "content": "[79] Philippe Mulhem, Nawal Ould Amer, and Mathias Gery. 2016. Axiomatic term-based personalized query expansion using bookmarking system. In International Conference on Database and Expert Systems Applications. Springer, 235-243." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 75, + 487, + 503, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 487, + 503, + 514 + ], + "spans": [ + { + "bbox": [ + 75, + 487, + 503, + 514 + ], + "type": "text", + "content": "[80] Sheshera Mysore, Zhuoran Lu, Mengting Wan, Longqi Yang, Steve Menezes, Tina Baghaee, Emmanuel Barajas Gonzalez, Jennifer Neville, and Tara Safavi. 2023. Pearl: Personalizing large language model writing assistants with generation-calibrated retrievers. arXiv preprint arXiv:2311.09180 (2023)." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 75, + 517, + 503, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 517, + 503, + 535 + ], + "spans": [ + { + "bbox": [ + 75, + 517, + 503, + 535 + ], + "type": "text", + "content": "[81] Dang Nguyen, Jian Chen, Yu Wang, Gang Wu, Namyong Park, Zhengmian Hu, Hanjia Lyu, Junda Wu, Ryan Aponte, Yu Xia, et al. 2024. Gui agents: A survey. arXiv preprint arXiv:2412.13501 (2024)." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 75, + 536, + 503, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 536, + 503, + 555 + ], + "spans": [ + { + "bbox": [ + 75, + 536, + 503, + 555 + ], + "type": "text", + "content": "[82] Duy A Nguyen, Rishi Kesav Mohan, Van Yang, Pritom Saha Akash, and Kevin Chen-Chuan Chang. 2025. RL-based Query Rewriting with Distilled LLM for online E-Commerce Systems. arXiv preprint arXiv:2501.18056 (2025)." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 75, + 556, + 503, + 585 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 556, + 503, + 585 + ], + "spans": [ + { + "bbox": [ + 75, + 556, + 503, + 585 + ], + "type": "text", + "content": "[83] Jianmo Ni, Jiacheng Li, and Julian McAuley. 2019. Justifying recommendations using distantly-labeled reviews and fine-grained aspects. In Proceedings of the 2019 conference on empirical methods in natural language processing and the 9th international joint conference on natural language processing (EMNLP-IJCNLP). 188-197." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 75, + 586, + 503, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 586, + 503, + 605 + ], + "spans": [ + { + "bbox": [ + 75, + 586, + 503, + 605 + ], + "type": "text", + "content": "[84] Lin Ning, Luyang Liu, Jiaxing Wu, Neo Wu, Devora Berlowitz, Sushant Prakash, Bradley Green, Shawn O'Banion, and Jun Xie. 2024. User-llm: Efficient llm contextualization with user embeddings. arXiv preprint arXiv:2402.13598 (2024)." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 75, + 606, + 503, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 606, + 503, + 624 + ], + "spans": [ + { + "bbox": [ + 75, + 606, + 503, + 624 + ], + "type": "text", + "content": "[85] Douglas Oard, William Webber, David Kirsch, and Sergey Golitsynski. 2015. Avocado research email collection. Philadelphia: Linguistic Data Consortium (2015)." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 75, + 625, + 452, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 625, + 452, + 635 + ], + "spans": [ + { + "bbox": [ + 75, + 625, + 452, + 635 + ], + "type": "text", + "content": "[86] U.S. National Library of Medicine. [n.d.]. PubMed: A Free Resource for Biomedical Literature. https://pubmed.ncbi.nlm.nih.gov/" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 75, + 636, + 503, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 636, + 503, + 655 + ], + "spans": [ + { + "bbox": [ + 75, + 636, + 503, + 655 + ], + "type": "text", + "content": "[87] Joon Sung Park, Joseph O'Brien, Carrie Jun Cai, Meredith Ringel Morris, Percy Liang, and Michael S Bernstein. 2023. Generative agents: Interactive simulacra of human behavior. In Proceedings of the 36th annual acm symposium on user interface software and technology. 1-22." + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 71, + 72, + 212, + 81 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 72, + 212, + 81 + ], + "spans": [ + { + "bbox": [ + 71, + 72, + 212, + 81 + ], + "type": "text", + "content": "A Survey of Personalization: From RAG to Agent" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "spans": [ + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 283, + 658, + 290, + 666 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 658, + 290, + 666 + ], + "spans": [ + { + "bbox": [ + 283, + 658, + 290, + 666 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 110, + 98, + 539, + 655 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 112, + 98, + 539, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 98, + 539, + 117 + ], + "spans": [ + { + "bbox": [ + 112, + 98, + 539, + 117 + ], + "type": "text", + "content": "[88] Greg Pass, Abdur Chowdhury, and Cayley Torgeson. 2006. A picture of search. In Proceedings of the 1st International Conference on Scalable Information Systems (Hong Kong) (InfoScale '06). Association for Computing Machinery, New York, NY, USA, 1-es. https://doi.org/10.1145/1146847.1146848" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 112, + 118, + 539, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 118, + 539, + 137 + ], + "spans": [ + { + "bbox": [ + 112, + 118, + 539, + 137 + ], + "type": "text", + "content": "[89] Vadim Igorevich Pavliukevich, Alina Khasanovna Zherdeva, Olesya Vladimirovna Makhnytkina, and Dmitriy Viktorovich Dyrmovskiy. [n. d.]. Improving RAG with LoRA finetuning for persona text generation. ([n. d.])." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 112, + 137, + 537, + 147 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 137, + 537, + 147 + ], + "spans": [ + { + "bbox": [ + 112, + 137, + 537, + 147 + ], + "type": "text", + "content": "[90] Dan Peng, Zhihui Fu, and Jun Wang. 2024. Pocketllm: Enabling on-device fine-tuning for personalized llms. arXiv preprint arXiv:2407.01031 (2024)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 148, + 539, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 148, + 539, + 167 + ], + "spans": [ + { + "bbox": [ + 111, + 148, + 539, + 167 + ], + "type": "text", + "content": "[91] Qiyao Peng, Hongtao Liu, Hongyan Xu, Qing Yang, Minglai Shao, and Wenjun Wang. 2024. Review-LLM: Harnessing Large Language Models for Personalized Review Generation. arXiv:2407.07487 [cs.CL] https://arxiv.org/abs/2407.07487" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 167, + 539, + 187 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 167, + 539, + 187 + ], + "spans": [ + { + "bbox": [ + 111, + 167, + 539, + 187 + ], + "type": "text", + "content": "[92] Hongjin Qian, Zhicheng Dou, Yutao Zhu, Yueyuan Ma, and Ji-Rong Wen. 2021. Learning implicit user profile for personalized retrieval-based chatbot. In proceedings of the 30th ACM international conference on Information & Knowledge Management. 1467-1477." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 188, + 539, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 188, + 539, + 216 + ], + "spans": [ + { + "bbox": [ + 111, + 188, + 539, + 216 + ], + "type": "text", + "content": "[93] Hongjin Qian, Xiahe Li, Hanxun Zhong, Yu Guo, Yueyuan Ma, Yutao Zhu, Zhanliang Liu, Zhicheng Dou, and Ji-Rong Wen. 2021. Pchatbot: a large-scale dataset for personalized chatbot. In Proceedings of the 44th international ACM SIGIR conference on research and development in information retrieval. 2470-2477." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 217, + 539, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 217, + 539, + 236 + ], + "spans": [ + { + "bbox": [ + 111, + 217, + 539, + 236 + ], + "type": "text", + "content": "[94] Xiaoru Qu, Yifan Wang, Zhao Li, and Jun Gao. 2024. Graph-enhanced prompt learning for personalized review generation. Data Science and Engineering 9, 3 (2024), 309-324." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 237, + 539, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 237, + 539, + 247 + ], + "spans": [ + { + "bbox": [ + 111, + 237, + 539, + 247 + ], + "type": "text", + "content": "[95] A. Rajaraman and J.D. Ullman. 2011. Mining of Massive Datasets. Cambridge University Press. https://books.google.co.uk/books?id=OefRhZyYOb0C" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 247, + 539, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 247, + 539, + 266 + ], + "spans": [ + { + "bbox": [ + 111, + 247, + 539, + 266 + ], + "type": "text", + "content": "[96] Yiting Ran, Xintao Wang, Rui Xu, Xinfeng Yuan, Jiaqing Liang, Deqing Yang, and Yanghua Xiao. 2024. Capturing minds, not just words: Enhancing role-playing language models with personality-indicative data. arXiv preprint arXiv:2406.18921 (2024)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 267, + 539, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 267, + 539, + 286 + ], + "spans": [ + { + "bbox": [ + 111, + 267, + 539, + 286 + ], + "type": "text", + "content": "[97] Chandan K. Reddy, Lluis Marquez, Fran Valero, Nikhil Rao, Hugo Zaragoza, Sambaran Bandyopadhyay, Arnab Biswas, Anlu Xing, and Karthik Subbian. 2022. Shopping Queries Dataset: A Large-Scale ESCI Benchmark for Improving Product Search. (2022). arXiv:2206.06588" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 287, + 537, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 287, + 537, + 297 + ], + "spans": [ + { + "bbox": [ + 111, + 287, + 537, + 297 + ], + "type": "text", + "content": "[98] Nils Reimers and Iryna Gurevych. 2019. Sentence-bert: Sentence embeddings using siamese bert-networks. arXiv preprint arXiv:1908.10084 (2019)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 297, + 539, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 297, + 539, + 316 + ], + "spans": [ + { + "bbox": [ + 111, + 297, + 539, + 316 + ], + "type": "text", + "content": "[99] Ruiyang Ren, Peng Qiu, Yingqi Qu, Jing Liu, Wayne Xin Zhao, Hua Wu, Ji-Rong Wen, and Haifeng Wang. 2024. Bases: Large-scale web search user simulation with large language model based agents. arXiv preprint arXiv:2402.17505 (2024)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 110, + 317, + 539, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 317, + 539, + 335 + ], + "spans": [ + { + "bbox": [ + 110, + 317, + 539, + 335 + ], + "type": "text", + "content": "[100] Matthew Renze and Erhan Guven. 2024. Self-reflection in llm agents: Effects on problem-solving performance. arXiv preprint arXiv:2405.06682 (2024)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 110, + 337, + 539, + 356 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 337, + 539, + 356 + ], + "spans": [ + { + "bbox": [ + 110, + 337, + 539, + 356 + ], + "type": "text", + "content": "[101] Chris Richardson, Yao Zhang, Kellen Gillespie, Sudipta Kar, Arshdeep Singh, Zeynab Raeesy, Omar Zia Khan, and Abhinav Sethy. 2023. Integrating summarization and retrieval for enhanced personalization via large language models. arXiv preprint arXiv:2310.20081 (2023)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 110, + 357, + 539, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 357, + 539, + 376 + ], + "spans": [ + { + "bbox": [ + 110, + 357, + 539, + 376 + ], + "type": "text", + "content": "[102] Stephen Robertson, Hugo Zaragoza, et al. 2009. The probabilistic relevance framework: BM25 and beyond. Foundations and Trends in Information Retrieval 3, 4 (2009), 333-389." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 110, + 376, + 539, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 376, + 539, + 396 + ], + "spans": [ + { + "bbox": [ + 110, + 376, + 539, + 396 + ], + "type": "text", + "content": "[103] Alireza Salemi, Surya Kallumadi, and Hamed Zamani. 2024. Optimization methods for personalizing large language models through retrieval augmentation. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 752-762." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 110, + 396, + 539, + 416 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 396, + 539, + 416 + ], + "spans": [ + { + "bbox": [ + 110, + 396, + 539, + 416 + ], + "type": "text", + "content": "[104] Alireza Salemi, Cheng Li, Mingyang Zhang, Qiao zhu Mei, Weize Kong, Tao Chen, Zhuowan Li, Michael Bendersky, and Hamed Zamani. 2025. Reasoning-Enhanced Self-Training for Long-Form Personalized Text Generation. arXiv preprint arXiv:2501.04167 (2025)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 110, + 417, + 539, + 436 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 417, + 539, + 436 + ], + "spans": [ + { + "bbox": [ + 110, + 417, + 539, + 436 + ], + "type": "text", + "content": "[105] Alireza Salemi, Sheshera Mysore, Michael Bendersky, and Hamed Zamani. 2024. LaMP: When Large Language Models Meet Personalization. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). 7370-7392." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 110, + 437, + 539, + 455 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 437, + 539, + 455 + ], + "spans": [ + { + "bbox": [ + 110, + 437, + 539, + 455 + ], + "type": "text", + "content": "[106] Alireza Salemi and Hamed Zamani. 2024. Learning to Rank for Multiple Retrieval-Augmented Models through Iterative Utility Maximization. arXiv preprint arXiv:2410.09942 (2024)." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 110, + 456, + 539, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 456, + 539, + 475 + ], + "spans": [ + { + "bbox": [ + 110, + 456, + 539, + 475 + ], + "type": "text", + "content": "[107] Shibani Santurkar, Esin Durmus, Faisal Ladhak, Cinoo Lee, Percy Liang, and Tatsunori Hashimoto. 2023. Whose opinions do language models reflect?. In International Conference on Machine Learning. PMLR, 29971-30004." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 110, + 476, + 539, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 476, + 539, + 495 + ], + "spans": [ + { + "bbox": [ + 110, + 476, + 539, + 495 + ], + "type": "text", + "content": "[108] Rossano Schifanella, Alain Barrat, Ciro Cattuto, Benjamin Markines, and Filippo Menczer. 2010. Folks in folksonomies: social link prediction from shared metadata. In Proceedings of the third ACM international conference on Web search and data mining. 271-280." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 110, + 496, + 539, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 496, + 539, + 515 + ], + "spans": [ + { + "bbox": [ + 110, + 496, + 539, + 515 + ], + "type": "text", + "content": "[109] Noor Shaker, Georgios Yannakakis, and Julian Togelius. 2010. Towards automatic personalized content generation for platform games. In Proceedings of the AAAI Conference on Artificial Intelligence and Interactive Digital Entertainment, Vol. 6. 63-68." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 110, + 516, + 537, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 516, + 537, + 525 + ], + "spans": [ + { + "bbox": [ + 110, + 516, + 537, + 525 + ], + "type": "text", + "content": "[110] Yunfan Shao, Linyang Li, Junqi Dai, and Xipeng Qiu. 2023. Character-llm: A trainable agent for role-playing. arXiv preprint arXiv:2310.10158 (2023)." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 110, + 526, + 539, + 545 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 526, + 539, + 545 + ], + "spans": [ + { + "bbox": [ + 110, + 526, + 539, + 545 + ], + "type": "text", + "content": "[111] Jocelyn Shen, Joel Mire, Hae Won Park, Cynthia Breazeal, and Maarten Sap. 2024. HEART-felt Narratives: Tracing Empathy and Narrative Style in Personal Stories with LLMs. arXiv preprint arXiv:2405.17633 (2024)." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 110, + 546, + 539, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 546, + 539, + 565 + ], + "spans": [ + { + "bbox": [ + 110, + 546, + 539, + 565 + ], + "type": "text", + "content": "[112] Yunxiao Shi, Xing Zi, Zijing Shi, Haimin Zhang, Qiang Wu, and Min Xu. 2024. Eragent: Enhancing retrieval-augmented language models with improved accuracy, efficiency, and personalization. arXiv preprint arXiv:2405.06683 (2024)." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 110, + 566, + 539, + 585 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 566, + 539, + 585 + ], + "spans": [ + { + "bbox": [ + 110, + 566, + 539, + 585 + ], + "type": "text", + "content": "[113] Aditi Singh, Abul Ehtesham, Saket Kumar, and Tala Talaei Khoei. 2025. Agentic Retrieval-Augmented Generation: A Survey on Agentic RAG. arXiv preprint arXiv:2501.09136 (2025)." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 110, + 586, + 539, + 614 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 586, + 539, + 614 + ], + "spans": [ + { + "bbox": [ + 110, + 586, + 539, + 614 + ], + "type": "text", + "content": "[114] Harmanpreet Singh, Nikhil Verma, Yixiao Wang, Manasa Bharadwaj, Homa Fashandi, Kevin Ferreira, and Chul Lee. 2024. Personal Large Language Model Agents: A Case Study on Tailored Travel Planning. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: Industry Track. 486-514." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 110, + 616, + 539, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 616, + 539, + 644 + ], + "spans": [ + { + "bbox": [ + 110, + 616, + 539, + 644 + ], + "type": "text", + "content": "[115] Shamane Siriwardhana, Rivindu Weerasekera, Elliott Wen, Tharindu Kaluarachchi, Rajib Rana, and Suranga Nanayakkara. 2023. Improving the domain adaptation of retrieval augmented generation (RAG) models for open domain question answering. Transactions of the Association for Computational Linguistics 11 (2023), 1-17." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 110, + 646, + 522, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 646, + 522, + 655 + ], + "spans": [ + { + "bbox": [ + 110, + 646, + 522, + 655 + ], + "type": "text", + "content": "[116] Mingyang Song and Mao Zheng. 2024. A Survey of Query Optimization in Large Language Models. arXiv preprint arXiv:2412.17558 (2024)." + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "spans": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 477, + 72, + 539, + 80 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 477, + 72, + 539, + 80 + ], + "spans": [ + { + "bbox": [ + 477, + 72, + 539, + 80 + ], + "type": "text", + "content": "X. Li and P. Jia, et al." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 320, + 658, + 328, + 665 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 658, + 328, + 665 + ], + "spans": [ + { + "bbox": [ + 320, + 658, + 328, + 665 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 72, + 98, + 502, + 654 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 72, + 98, + 394, + 107 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 98, + 394, + 107 + ], + "spans": [ + { + "bbox": [ + 72, + 98, + 394, + 107 + ], + "type": "text", + "content": "[117] Spotify. 2023. Annoy: Approximate Nearest Neighbors in C++/Python. https://github.com/spotify/annoy" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 72, + 108, + 502, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 108, + 502, + 126 + ], + "spans": [ + { + "bbox": [ + 72, + 108, + 502, + 126 + ], + "type": "text", + "content": "[118] Stuck_In_the Matrix. 2015. Reddit Public Comments (2007-10 through 2015-05). (2015). https://www.reddit.com/r/datasets/comments/3bxlg7/i_have EVERY_publicly-available Reddit_COMMENT/" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 72, + 128, + 502, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 128, + 502, + 146 + ], + "spans": [ + { + "bbox": [ + 72, + 128, + 502, + 146 + ], + "type": "text", + "content": "[119] Lei Sun, Jinming Zhao, and Qin Jin. 2024. Revealing Personality Traits: A New Benchmark Dataset for Explanable Personality Recognition on Dialogues. arXiv preprint arXiv:2409.19723 (2024)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 72, + 148, + 502, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 148, + 502, + 167 + ], + "spans": [ + { + "bbox": [ + 72, + 148, + 502, + 167 + ], + "type": "text", + "content": "[120] Zhaoxuan Tan, Zheyuan Liu, and Meng Jiang. 2024. Personalized pieces: Efficient personalized large language models through collaborative efforts. arXiv preprint arXiv:2406.10471 (2024)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 72, + 168, + 502, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 168, + 502, + 186 + ], + "spans": [ + { + "bbox": [ + 72, + 168, + 502, + 186 + ], + "type": "text", + "content": "[121] Zhaoxuan Tan, Qingkai Zeng, Yijun Tian, Zheyuan Liu, Bing Yin, and Meng Jiang. 2024. Democratizing large language models via personalized parameter-efficient fine-tuning. arXiv preprint arXiv:2402.04401 (2024)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 72, + 187, + 502, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 187, + 502, + 206 + ], + "spans": [ + { + "bbox": [ + 72, + 187, + 502, + 206 + ], + "type": "text", + "content": "[122] Zhaoxuan Tan, Qingkai Zeng, Yijun Tian, Zheyuan Liu, Bing Yin, and Meng Jiang. 2025. Democratizing Large Language Models via Personalized Parameter-Efficient Fine-tuning. arXiv:2402.04401 [cs.CL] https://arxiv.org/abs/2402.04401" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 72, + 207, + 502, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 207, + 502, + 226 + ], + "spans": [ + { + "bbox": [ + 72, + 207, + 502, + 226 + ], + "type": "text", + "content": "[123] Quan Tu, Shilong Fan, Zihang Tian, and Rui Yan. 2024. Charactereval: A Chinese benchmark for role-playing conversational agent evaluation. arXiv preprint arXiv:2401.01275 (2024)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 72, + 227, + 351, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 227, + 351, + 236 + ], + "spans": [ + { + "bbox": [ + 72, + 227, + 351, + 236 + ], + "type": "text", + "content": "[124] Cornell University. [n.d.]. arXiv: An Open Access Repository for Research. https://arxiv.org/" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 72, + 237, + 502, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 237, + 502, + 266 + ], + "spans": [ + { + "bbox": [ + 72, + 237, + 502, + 266 + ], + "type": "text", + "content": "[125] Hemanth Vemuri, Sheshansh Agrawal, Shivam Mittal, Deepak Saini, Akshay Soni, Abhinav V Sambasivan, Wenhao Lu, Yajun Wang, Mehul Parsana, Purushottam Kar, et al. 2023. Personalized retrieval over millions of items. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1014-1022." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 72, + 266, + 502, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 266, + 502, + 286 + ], + "spans": [ + { + "bbox": [ + 72, + 266, + 502, + 286 + ], + "type": "text", + "content": "[126] Bryan Wang, Gang Li, and Yang Li. 2023. Enabling conversational interaction with mobile ui using large language models. In Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems. 1-17." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 72, + 287, + 502, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 287, + 502, + 306 + ], + "spans": [ + { + "bbox": [ + 72, + 287, + 502, + 306 + ], + "type": "text", + "content": "[127] Guanzhi Wang, Yuqi Xie, Yunfan Jiang, Ajay Mandlekar, Chaowei Xiao, Yuke Zhu, Linxi Fan, and Anima Anandkumar. 2023. Voyager: An open-ended embodied agent with large language models. arXiv preprint arXiv:2305.16291 (2023)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 72, + 307, + 502, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 307, + 502, + 326 + ], + "spans": [ + { + "bbox": [ + 72, + 307, + 502, + 326 + ], + "type": "text", + "content": "[128] Hongru Wang, Wenyu Huang, Yang Deng, Rui Wang, Zezhong Wang, Yufei Wang, Fei Mi, Jeff Z Pan, and Kam-Fai Wong. 2024. Unims-rag: A unified multi-source retrieval-augmented generation for personalized dialogue systems. arXiv preprint arXiv:2401.13256 (2024)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 72, + 327, + 502, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 327, + 502, + 346 + ], + "spans": [ + { + "bbox": [ + 72, + 327, + 502, + 346 + ], + "type": "text", + "content": "[129] Hongru Wang, Rui Wang, Fei Mi, Yang Deng, Zezhong Wang, Bin Liang, Ruifeng Xu, and Kam-Fai Wong. 2023. Cue-CoT: Chain-of-thought prompting for responding to in-depth dialogue questions with LLMs. arXiv preprint arXiv:2305.11792 (2023)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 72, + 346, + 502, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 346, + 502, + 365 + ], + "spans": [ + { + "bbox": [ + 72, + 346, + 502, + 365 + ], + "type": "text", + "content": "[130] Jian Wang, Yi Cheng, Dongding Lin, Chak Tou Leong, and Wenjie Li. 2023. Target-oriented proactive dialogue systems with personalization: Problem formulation and dataset curation. arXiv preprint arXiv:2310.07397 (2023)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 72, + 366, + 502, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 366, + 502, + 385 + ], + "spans": [ + { + "bbox": [ + 72, + 366, + 502, + 385 + ], + "type": "text", + "content": "[131] Lei Wang, Chen Ma, Xueyang Feng, Zeyu Zhang, Hao Yang, Jingsen Zhang, Zhiyuan Chen, Jiakai Tang, Xu Chen, Yankai Lin, et al. 2024. A survey on large language model based autonomous agents. Frontiers of Computer Science 18, 6 (2024), 186345." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 72, + 386, + 496, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 386, + 496, + 396 + ], + "spans": [ + { + "bbox": [ + 72, + 386, + 496, + 396 + ], + "type": "text", + "content": "[132] Liang Wang, Nan Yang, and Furu Wei. 2023. Query2doc: Query expansion with large language models. arXiv preprint arXiv:2303.07678 (2023)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 72, + 396, + 502, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 396, + 502, + 415 + ], + "spans": [ + { + "bbox": [ + 72, + 396, + 502, + 415 + ], + "type": "text", + "content": "[133] Lei Wang, Jingsen Zhang, Hao Yang, Zhiyuan Chen, Jiakai Tang, Zeyu Zhang, Xu Chen, Yankai Lin, Ruihua Song, Wayne Xin Zhao, et al. 2023. User behavior simulation with large language model based agents. arXiv preprint arXiv:2306.02552 (2023)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 72, + 417, + 502, + 436 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 417, + 502, + 436 + ], + "spans": [ + { + "bbox": [ + 72, + 417, + 502, + 436 + ], + "type": "text", + "content": "[134] Xintao Wang, Yunze Xiao, Jen-tse Huang, Siyu Yuan, Rui Xu, Haoran Guo, Quan Tu, Yaying Fei, Ziang Leng, Wei Wang, et al. 2023. Incharacter: Evaluating personality fidelity in role-playing agents through psychological interviews. arXiv preprint arXiv:2310.17976 (2023)." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 72, + 437, + 502, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 437, + 502, + 456 + ], + "spans": [ + { + "bbox": [ + 72, + 437, + 502, + 456 + ], + "type": "text", + "content": "[135] Yixiao Wang, Homa Fashandi, and Kevin Ferreira. 2024. Investigating the Personality Consistency in Quantized Role-Playing Dialogue Agents. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: Industry Track. 239–255." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 72, + 456, + 502, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 456, + 502, + 475 + ], + "spans": [ + { + "bbox": [ + 72, + 456, + 502, + 475 + ], + "type": "text", + "content": "[136] Yu Wang, Yifan Gao, Xiusi Chen, Haoming Jiang, Shiyang Li, Jingfeng Yang, Qingyu Yin, Zheng Li, Xian Li, Bing Yin, et al. [n.d.]. MEMORYLLM: Towards Self-Updatable Large Language Models. In Forty-first International Conference on Machine Learning." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 72, + 476, + 502, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 476, + 502, + 495 + ], + "spans": [ + { + "bbox": [ + 72, + 476, + 502, + 495 + ], + "type": "text", + "content": "[137] Zheng Wang, Zhongyang Li, Zeren Jiang, Dandan Tu, and Wei Shi. 2024. Crafting Personalized Agents through Retrieval-Augmented Generation on Editable Memory Graphs. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing. 4891-4906." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 72, + 496, + 502, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 496, + 502, + 515 + ], + "spans": [ + { + "bbox": [ + 72, + 496, + 502, + 515 + ], + "type": "text", + "content": "[138] Zijie J Wang and Duen Horng Chau. 2024. MeMemo: On-device Retrieval Augmentation for Private and Personalized Text Generation. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 2765-2770." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 72, + 516, + 502, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 516, + 502, + 544 + ], + "spans": [ + { + "bbox": [ + 72, + 516, + 502, + 544 + ], + "type": "text", + "content": "[139] Zekun Moore Wang, Zhongyuan Peng, Haoran Que, Jiaheng Liu, Wangchunshu Zhou, Yuhan Wu, Hongcheng Guo, Ruitong Gan, Zehao Ni, Jian Yang, et al. 2023. Rolellm: Benchmarking, eliciting, and enhancing role-playing abilities of large language models. arXiv preprint arXiv:2310.00746 (2023)." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 72, + 546, + 502, + 574 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 546, + 502, + 574 + ], + "spans": [ + { + "bbox": [ + 72, + 546, + 502, + 574 + ], + "type": "text", + "content": "[140] Tianxin Wei, Bowen Jin, Ruirui Li, Hansi Zeng, Zhengyang Wang, Jianhui Sun, Qingyu Yin, Hanqing Lu, Suhang Wang, Jingrui He, et al. 2024. Towards unified multi-modal personalization: Large vision-language models for generative recommendation and beyond. arXiv preprint arXiv:2403.10667 (2024)." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 72, + 576, + 502, + 595 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 576, + 502, + 595 + ], + "spans": [ + { + "bbox": [ + 72, + 576, + 502, + 595 + ], + "type": "text", + "content": "[141] Robert Wetzker, Carsten Zimmermann, and Christian Bauchage. 2008. Analyzing social bookmarking systems: A del. icio. us cookbook. In Proceedings of the ECAI 2008 Mining Social Data Workshop. 26-30." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 72, + 596, + 502, + 615 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 596, + 502, + 615 + ], + "spans": [ + { + "bbox": [ + 72, + 596, + 502, + 615 + ], + "type": "text", + "content": "[142] Stanisław Wozniak, Bartlomiej Koptyra, Arkadiusz Janz, Przemysław Kazienko, and Jan Kocón. 2024. Personalized large language models. arXiv preprint arXiv:2402.09269 (2024)." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 72, + 616, + 502, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 616, + 502, + 635 + ], + "spans": [ + { + "bbox": [ + 72, + 616, + 502, + 635 + ], + "type": "text", + "content": "[143] Junde Wu, Jiayuan Zhu, Yunli Qi, Jingkun Chen, Min Xu, Filippo Menolascina, and Vicente Grau. 2024. Medical graph rag: Towards safe medical large language model via graph retrieval-augmented generation. arXiv preprint arXiv:2408.04187 (2024)." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 72, + 635, + 502, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 635, + 502, + 654 + ], + "spans": [ + { + "bbox": [ + 72, + 635, + 502, + 654 + ], + "type": "text", + "content": "[144] Xuan Wu, Dong Zhou, Yu Xu, and Seamus Lawless. 2017. Personalized query expansion utilizing multi-relational social data. In 2017 12th International Workshop on Semantic and Social Media Adaptation and Personalization (SMAP). IEEE, 65-70." + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 70, + 72, + 213, + 81 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 72, + 213, + 81 + ], + "spans": [ + { + "bbox": [ + 70, + 72, + 213, + 81 + ], + "type": "text", + "content": "A Survey of Personalization: From RAG to Agent" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "spans": [ + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 283, + 658, + 290, + 666 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 658, + 290, + 666 + ], + "spans": [ + { + "bbox": [ + 283, + 658, + 290, + 666 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 108, + 98, + 539, + 654 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 108, + 98, + 539, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 98, + 539, + 126 + ], + "spans": [ + { + "bbox": [ + 108, + 98, + 539, + 126 + ], + "type": "text", + "content": "[145] Yunjia Xi, Weiwen Liu, Jianghao Lin, Xiaoling Cai, Hong Zhu, Jieming Zhu, Bo Chen, Ruiming Tang, Weinan Zhang, and Yong Yu. 2024. Towards open-world recommendation with knowledge augmentation from large language models. In Proceedings of the 18th ACM Conference on Recommender Systems. 12-22." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 108, + 127, + 539, + 147 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 127, + 539, + 147 + ], + "spans": [ + { + "bbox": [ + 108, + 127, + 539, + 147 + ], + "type": "text", + "content": "[146] Zhiheng Xi, Wenxiang Chen, Xin Guo, Wei He, Yiwen Ding, Boyang Hong, Ming Zhang, Junzhe Wang, Senjie Jin, Enyu Zhou, et al. 2025. The rise and potential of large language model based agents: A survey. Science China Information Sciences 68, 2 (2025), 121101." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 108, + 148, + 539, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 148, + 539, + 167 + ], + "spans": [ + { + "bbox": [ + 108, + 148, + 539, + 167 + ], + "type": "text", + "content": "[147] Shitao Xiao, Zheng Liu, Peitian Zhang, Niklas Muennighoff, Defu Lian, and Jian-Yun Nie. 2024. C-pack: Packed resources for general chinese embeddings. In Proceedings of the 47th international ACM SIGIR conference on research and development in information retrieval. 641-649." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 108, + 167, + 539, + 187 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 167, + 539, + 187 + ], + "spans": [ + { + "bbox": [ + 108, + 167, + 539, + 187 + ], + "type": "text", + "content": "[148] Huatao Xu, Liying Han, Qirui Yang, Mo Li, and Mani Srivastava. 2024. Penetrative ai: Making llms comprehend the physical world. In Proceedings of the 25th International Workshop on Mobile Computing Systems and Applications. 1-7." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 108, + 188, + 539, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 188, + 539, + 206 + ], + "spans": [ + { + "bbox": [ + 108, + 188, + 539, + 206 + ], + "type": "text", + "content": "[149] Hongyan Xu, Hongtao Liu, Pengfei Jiao, and Wenjun Wang. 2021. Transformer reasoning network for personalized review summarization. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1452-1461." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 108, + 207, + 539, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 207, + 539, + 226 + ], + "spans": [ + { + "bbox": [ + 108, + 207, + 539, + 226 + ], + "type": "text", + "content": "[150] Xinchao Xu, Zhibin Gou, Wenquan Wu, Zheng-Yu Niu, Hua Wu, Haifeng Wang, and Shihang Wang. 2022. Long time no see! open-domain conversation with long-term persona memory. arXiv preprint arXiv:2203.05797 (2022)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 108, + 227, + 539, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 227, + 539, + 247 + ], + "spans": [ + { + "bbox": [ + 108, + 227, + 539, + 247 + ], + "type": "text", + "content": "[151] Yiyan Xu, Jinghao Zhang, Alireza Salemi, Xinting Hu, Wenjie Wang, Fuli Feng, Hamed Zamani, Xiangnan He, and Tat-Seng Chua. 2025. Personalized Generation In Large Model Era: A Survey. arXiv preprint arXiv:2503.02614 (2025)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 108, + 247, + 539, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 247, + 539, + 266 + ], + "spans": [ + { + "bbox": [ + 108, + 247, + 539, + 266 + ], + "type": "text", + "content": "[152] Hao Yu, Xin Yang, Xin Gao, Yan Kang, Hao Wang, Junbo Zhang, and Tianrui Li. 2024. Personalized federated continual learning via multi-granularity prompt. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 4023-4034." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 108, + 267, + 539, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 267, + 539, + 286 + ], + "spans": [ + { + "bbox": [ + 108, + 267, + 539, + 286 + ], + "type": "text", + "content": "[153] Xiaoyan Yu, Tongxu Luo, Yifan Wei, Fangyu Lei, Yiming Huang, Hao Peng, and Liehuang Zhu. 2024. Neeko: Leveraging dynamic lora for efficient multi-character role-playing agent. arXiv preprint arXiv:2402.13717 (2024)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 108, + 287, + 539, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 287, + 539, + 306 + ], + "spans": [ + { + "bbox": [ + 108, + 287, + 539, + 306 + ], + "type": "text", + "content": "[154] Xinfeng Yuan, Siyu Yuan, Yuhan Cui, Tianhe Lin, Xintao Wang, Rui Xu, Jiangjie Chen, and Deqing Yang. 2024. Evaluating character understanding of large language models via character profiling from fictional works. arXiv preprint arXiv:2404.12726 (2024)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 108, + 307, + 539, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 307, + 539, + 326 + ], + "spans": [ + { + "bbox": [ + 108, + 307, + 539, + 326 + ], + "type": "text", + "content": "[155] Hansi Zeng, Surya Kallumadi, Zaid Alibadi, Rodrigo Nogueira, and Hamed Zamani. 2023. A personalized dense retrieval framework for unified information access. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval. 121-130." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 108, + 327, + 539, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 327, + 539, + 346 + ], + "spans": [ + { + "bbox": [ + 108, + 327, + 539, + 346 + ], + "type": "text", + "content": "[156] Saber Zerhoudi and Michael Granitzer. 2024. PersonaRAG: Enhancing Retrieval-Augmented Generation Systems with User-Centric Agents. arXiv preprint arXiv:2407.09394 (2024)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 108, + 346, + 539, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 346, + 539, + 376 + ], + "spans": [ + { + "bbox": [ + 108, + 346, + 539, + 376 + ], + "type": "text", + "content": "[157] Han Zhang, Songlin Wang, Kang Zhang, Zhiling Tang, Yunjiang Jiang, Yun Xiao, Weipeng Yan, and Wen-Yun Yang. 2020. Towards personalized and semantic retrieval: An end-to-end solution for e-commerce search via embedding learning. In Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval. 2407-2416." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 108, + 376, + 473, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 376, + 473, + 386 + ], + "spans": [ + { + "bbox": [ + 108, + 376, + 473, + 386 + ], + "type": "text", + "content": "[158] Jiarui Zhang. 2024. Guided profile generation improves personalization with llms. arXiv preprint arXiv:2409.13093 (2024)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 108, + 387, + 539, + 406 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 387, + 539, + 406 + ], + "spans": [ + { + "bbox": [ + 108, + 387, + 539, + 406 + ], + "type": "text", + "content": "[159] Jesse Zhang, Jiahui Zhang, Karl Pertsch, Ziyi Liu, Xiang Ren, Minsuk Chang, Shao-Hua Sun, and Joseph J Lim. [n.d.]. Bootstrap Your Own Skills: Learning to Solve New Tasks with Large Language Model Guidance. In 7th Annual Conference on Robot Learning." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 108, + 407, + 539, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 407, + 539, + 426 + ], + "spans": [ + { + "bbox": [ + 108, + 407, + 539, + 426 + ], + "type": "text", + "content": "[160] Kai Zhang, Yangyang Kang, Fubang Zhao, and Xiaozhong Liu. 2023. LLM-based medical assistant personalization with short-and long-term memory coordination. arXiv preprint arXiv:2309.11696 (2023)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 108, + 426, + 539, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 426, + 539, + 445 + ], + "spans": [ + { + "bbox": [ + 108, + 426, + 539, + 445 + ], + "type": "text", + "content": "[161] Kaiyan Zhang, Jianyu Wang, Ermo Hua, Biqing Qi, Ning Ding, and Bowen Zhou. 2024. Cogenesis: A framework collaborating large and small language models for secure context-aware instruction following. arXiv preprint arXiv:2403.03129 (2024)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 108, + 447, + 539, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 447, + 539, + 465 + ], + "spans": [ + { + "bbox": [ + 108, + 447, + 539, + 465 + ], + "type": "text", + "content": "[162] Kai Zhang, Fubang Zhao, Yangyang Kang, and Xiaozhong Liu. 2023. Memory-augmented llm personalization with short-and long-term memory coordination. arXiv preprint arXiv:2309.11696 (2023)." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 108, + 466, + 539, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 466, + 539, + 495 + ], + "spans": [ + { + "bbox": [ + 108, + 466, + 539, + 495 + ], + "type": "text", + "content": "[163] Wenlin Zhang, Chuhan Wu, Xiangyang Li, Yuhao Wang, Kuicai Dong, Yichao Wang, Xinyi Dai, Xiangyu Zhao, Huifeng Guo, and Ruiming Tang. 2025. LLMTreeRec: Unleashing the Power of Large Language Models for Cold-Start Recommendations. In Proceedings of the 31st International Conference on Computational Linguistics. 886-896." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 108, + 496, + 539, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 496, + 539, + 515 + ], + "spans": [ + { + "bbox": [ + 108, + 496, + 539, + 515 + ], + "type": "text", + "content": "[164] Yanyue Zhang, Yulan He, and Deyu Zhou. 2025. Rehearse With User: Personalized Opinion Summarization via Role-Playing based on Large Language Models. arXiv preprint arXiv:2503.00449 (2025)." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 108, + 516, + 539, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 516, + 539, + 535 + ], + "spans": [ + { + "bbox": [ + 108, + 516, + 539, + 535 + ], + "type": "text", + "content": "[165] You Zhang, Jin Wang, Liang-Chih Yu, Dan Xu, and Xuejie Zhang. 2024. Personalized LoRA for human-centered text understanding. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 38. 1958-19596." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 108, + 536, + 539, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 536, + 539, + 555 + ], + "spans": [ + { + "bbox": [ + 108, + 536, + 539, + 555 + ], + "type": "text", + "content": "[166] Yabin Zhang, Wenhui Yu, Erhan Zhang, Xu Chen, Lantao Hu, Peng Jiang, and Kun Gai. 2024. Recgpt: Generative personalized prompts for sequential recommendation via chatgpt training paradigm. arXiv preprint arXiv:2404.08675 (2024)." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 108, + 556, + 539, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 556, + 539, + 575 + ], + "spans": [ + { + "bbox": [ + 108, + 556, + 539, + 575 + ], + "type": "text", + "content": "[167] Zeyu Zhang, Xiaohe Bo, Chen Ma, Rui Li, Xu Chen, Quanyu Dai, Jieming Zhu, Zhenhua Dong, and Ji-Rong Wen. 2024. A survey on the memory mechanism of large language model based agents. arXiv preprint arXiv:2404.13501 (2024)." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 108, + 576, + 539, + 595 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 576, + 539, + 595 + ], + "spans": [ + { + "bbox": [ + 108, + 576, + 539, + 595 + ], + "type": "text", + "content": "[168] Zhehao Zhang, Ryan A Rossi, Branislav Kveton, Yijia Shao, Diyi Yang, Hamed Zamani, Franck Dernoncourt, Joe Barrow, Tong Yu, Sungchul Kim, et al. 2024. Personalization of large language models: A survey. arXiv preprint arXiv:2411.00027 (2024)." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 108, + 596, + 539, + 615 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 596, + 539, + 615 + ], + "spans": [ + { + "bbox": [ + 108, + 596, + 539, + 615 + ], + "type": "text", + "content": "[169] Yi Zheng, Chongyang Ma, Kanle Shi, and Haibin Huang. 2023. Agents meet okr: An object and key results driven agent system with hierarchical self-collaboration and self-evaluation. arXiv preprint arXiv:2311.16542 (2023)." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 108, + 616, + 539, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 616, + 539, + 635 + ], + "spans": [ + { + "bbox": [ + 108, + 616, + 539, + 635 + ], + "type": "text", + "content": "[170] Hanxun Zhong, Zhicheng Dou, Yutao Zhu, Hongjin Qian, and Ji-Rong Wen. 2022. Less is more: Learning to refine dialogue history for personalized dialogue generation. arXiv preprint arXiv:2204.08128 (2022)." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 108, + 635, + 539, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 635, + 539, + 654 + ], + "spans": [ + { + "bbox": [ + 108, + 635, + 539, + 654 + ], + "type": "text", + "content": "[171] Wanjun Zhong, Duyu Tang, Jiahai Wang, Jian Yin, and Nan Duan. 2021. UserAdapter: Few-shot user learning in sentiment analysis. In Findings of the Association for Computational Linguistics: ACL-JJCNLP 2021. 1484-1488." + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "spans": [ + { + "bbox": [ + 108, + 72, + 281, + 81 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 477, + 72, + 539, + 80 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 477, + 72, + 539, + 80 + ], + "spans": [ + { + "bbox": [ + 477, + 72, + 539, + 80 + ], + "type": "text", + "content": "X. Li and P. Jia, et al." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 320, + 658, + 328, + 666 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 658, + 328, + 666 + ], + "spans": [ + { + "bbox": [ + 320, + 658, + 328, + 666 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 72, + 98, + 503, + 177 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 72, + 98, + 503, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 98, + 503, + 116 + ], + "spans": [ + { + "bbox": [ + 72, + 98, + 503, + 116 + ], + "type": "text", + "content": "[172] Dong Zhou, Séamus Lawless, and Vincent Wade. 2012. Improving search via personalized query expansion using social media. Information retrieval 15 (2012), 218-242." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 72, + 118, + 503, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 118, + 503, + 137 + ], + "spans": [ + { + "bbox": [ + 72, + 118, + 503, + 137 + ], + "type": "text", + "content": "[173] Denny Zhou, Nathanael Scharli, Le Hou, Jason Wei, Nathan Scales, Xuezhi Wang, Dale Schuurmans, Claire Cui, Olivier Bousquet, Quoc Le, et al. 2022. Least-to-most prompting enables complex reasoning in large language models. arXiv preprint arXiv:2205.10625 (2022)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 72, + 138, + 502, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 138, + 502, + 156 + ], + "spans": [ + { + "bbox": [ + 72, + 138, + 502, + 156 + ], + "type": "text", + "content": "[174] Yujia Zhou, Qiannan Zhu, Jiajie Jin, and Zhicheng Dou. 2024. Cognitive personalized search integrating large language models with an efficient memory mechanism. In Proceedings of the ACM Web Conference 2024. 1464-1473." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 72, + 158, + 502, + 177 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 158, + 502, + 177 + ], + "spans": [ + { + "bbox": [ + 72, + 158, + 502, + 177 + ], + "type": "text", + "content": "[175] Yuchen Zhuang, Haotian Sun, Yue Yu, Rushi Qiang, Qifan Wang, Chao Zhang, and Bo Dai. [n.d.]. Hydra: Model factorization framework for black-box llm personalization, 2024. URL https://arxiv.org/abs/2406.02888 ([n.d.])." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 71, + 189, + 308, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 189, + 308, + 198 + ], + "spans": [ + { + "bbox": [ + 71, + 189, + 308, + 198 + ], + "type": "text", + "content": "Received 20 February 2007; revised 12 March 2009; accepted 5 June 2009" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 71, + 72, + 212, + 81 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 72, + 212, + 81 + ], + "spans": [ + { + "bbox": [ + 71, + 72, + 212, + 81 + ], + "type": "text", + "content": "A Survey of Personalization: From RAG to Agent" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "spans": [ + { + "bbox": [ + 330, + 72, + 503, + 80 + ], + "type": "text", + "content": "Conference acronym 'XX, June 03-05, 2018, Woodstock, NY" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 283, + 658, + 290, + 666 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 658, + 290, + 666 + ], + "spans": [ + { + "bbox": [ + 283, + 658, + 290, + 666 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10157/9641b7f1-5ad9-4487-b9c3-7734cccafedc_content_list.json b/data/2025/2504_10xxx/2504.10157/9641b7f1-5ad9-4487-b9c3-7734cccafedc_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..e1bd8f3b0da8f30090a2d3f893d11159c5b392c3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/9641b7f1-5ad9-4487-b9c3-7734cccafedc_content_list.json @@ -0,0 +1,2572 @@ +[ + { + "type": "text", + "text": "SocioVerse: A World Model for Social Simulation Powered by LLM Agents and A Pool of 10 Million Real-World Users", + "text_level": 1, + "bbox": [ + 196, + 122, + 802, + 195 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xinnong Zhang $^{1,2\\dagger}$ , Jiayu Lin $^{1,2\\dagger}$ , Xinyi Mou $^{2\\dagger}$ , Shiyue Yang $^{2}$ , Xiawei Liu $^{2}$ , Libo Sun $^{2}$ , Hanjia Lyu $^{3}$ , Yihang Yang $^{2}$ , Weihong Qi $^{4}$ , Yue Chen $^{2}$ , Guanying Li $^{2}$ , Ling Yan $^{5}$ , Yao Hu $^{5}$ , Siming Chen $^{2}$ , Yu Wang $^{2}$ , Xuanjing Huang $^{2}$ , Jiebo Luo $^{3}$ , Shiping Tang $^{2}$ , Libo Wu $^{1,2}$ , Baohua Zhou $^{2}$ , Zhongyu Wei $^{1,2}$", + "bbox": [ + 186, + 244, + 815, + 306 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Shanghai Innovation Institute, $^{2}$ Fudan University, $^{3}$ University of Rochester, $^{4}$ Indiana University, $^{5}$ Xiaohongshu Inc. zywei@fudan.edu.cn \nSocioVerse: https://github.com/FudanDISC/SocioVerse", + "bbox": [ + 282, + 305, + 712, + 362 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/ee37a034cc7bfd54903875bca0d8ebdb66151851f1d8c1aae3fe11feeefe474d.jpg", + "image_caption": [ + "Figure 1: An illustration of the SocioVerse in the case of Ukraine issue. The alignment challenges are well handled regarding environment, user, scenario, and behavior." + ], + "image_footnote": [], + "bbox": [ + 183, + 378, + 802, + 627 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 459, + 674, + 537, + 690 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Social simulation is transforming traditional social science research by modeling human behavior through interactions between virtual individuals and their environments. With recent advances in large language models (LLMs), this approach has shown growing potential in capturing individual differences and predicting group behaviors. However, existing methods face alignment challenges related to the environment, target users, interaction mechanisms, and behavioral patterns. To this end, we introduce SocioVerse, an LLM-agent-driven world model for social simulation. Our framework features four powerful alignment components and a user pool of 10 million real individuals. To validate its effectiveness, we conducted large-scale simulation experiments across three distinct domains: politics, news, and economics. Results demonstrate that SocioVerse can reflect large-scale population dynamics while ensuring diversity, credibility, and representativeness through standardized procedures and minimal manual adjustments.", + "bbox": [ + 228, + 704, + 767, + 886 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.10157v3 [cs.CL] 15 Jul 2025", + "bbox": [ + 22, + 282, + 57, + 715 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "These authors contribute equally to this work.", + "bbox": [ + 192, + 896, + 470, + 911 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 173, + 89, + 312, + 104 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The study of human behavior aims to understand how individuals and groups act in various social contexts and serves as a cornerstone of social science research. Traditionally, this has been accomplished using methods such as surveys, interviews, and observations [10, 18, 44]. However, these approaches often encounter challenges, including high costs, limited sample sizes, and ethical concerns. As a result, researchers have resorted to alternative methods for studying human behavior.", + "bbox": [ + 169, + 132, + 826, + 202 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Social simulation has emerged as an effective method for addressing this issue, where researchers use agents to model human behavior, observe their reactions, and translate these findings into insights about human behavior [48, 50]. By assigning behavioral rules to autonomous agents, researchers can explore how micro-level decisions lead to emergent macro-level patterns through the agent-based models [11, 21]. This approach enables capturing specific groups' preferences on particular topics and forecasting potential social dynamics. Furthermore, recent advancements in large language models (LLMs) have significantly enhanced agents' reasoning and decision-making capabilities, enabling them to operate and interact within increasingly realistic and complex environments [3, 35, 37].", + "bbox": [ + 169, + 207, + 823, + 319 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recent studies have explored social simulation across various levels and scenarios, from mimicking well-known individuals and mirroring specific situations to modeling large-scale social dynamics [4, 29, 34, 36, 49, 60]. However, they share a common challenge: alignment between the simulated environment and the real world, which manifests across multiple dimensions and raises several key questions that remain to be addressed, as shown in Figure 1.", + "bbox": [ + 169, + 325, + 826, + 395 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Q1. How to align the simulated environment with the real world?", + "text_level": 1, + "bbox": [ + 171, + 401, + 629, + 414 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In the real world, new events occur every day and new content is continuously generated. The behavior of real users is rooted in these ever-evolving social contexts and policy agendas. However, the static knowledge of LLMs prevents them from aligning with the dynamic nature of the real-world social environment [2, 15]. There is a gap between the simulated context and the real world, which results in discrepancies between the simulation process and outcomes compared to those in reality. Therefore, it is necessary to establish an update mechanism to keep the simulated environment synchronized with the real world.", + "bbox": [ + 169, + 415, + 826, + 512 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Q2. How to align simulated agents with target users precisely?", + "text_level": 1, + "bbox": [ + 171, + 518, + 609, + 532 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The composition of users in the real world is both complex and diverse, making it impractical to enumerate all users in every scenario. Therefore, it is essential to identify target users whose distribution aligns with that of the users in the corresponding scenario, thereby accurately reflecting the real-world composition and relationships [17, 45]. Based on this, precise target user simulation also requires providing agents with a detailed and comprehensive description of the corresponding users, often involving the integration of high-fidelity demographic, contextual, and behavioral data.", + "bbox": [ + 169, + 532, + 823, + 614 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Q3. How to align the interaction mechanism with the real world among different scenarios?", + "text_level": 1, + "bbox": [ + 171, + 621, + 810, + 635 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The diversity of social interactions presents challenges in social simulation design, requiring deliberate choices regarding the number of individuals, social structures, interaction patterns, and message dissemination mechanisms, to align with the real world. This often results in independently constructed task-specific simulation pipelines performing repetitive work, which reduces their generalizability and scalability [26, 58]. Therefore, there is a need for unified simulation frameworks based on systematic categorization to standardize simulation components and facilitate extensibility across different social scenarios.", + "bbox": [ + 169, + 635, + 826, + 732 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Q4. How to align the behavioral pattern with the real-world groups?", + "text_level": 1, + "bbox": [ + 171, + 738, + 651, + 753 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "When the environment perceived by agents, the user composition, and the interaction mechanisms are aligned with the real world, agents are expected to exhibit responses consistent with those of the corresponding real users. However, current LLMs exhibit inherent bias and limitations in such reasoning, failing to infer different types of user behaviors [16, 60]. Therefore, it is necessary to systematically collect behavior-driving factors across different user characteristics and adopt appropriate modeling approaches to effectively capture diverse behavior patterns.", + "bbox": [ + 169, + 752, + 825, + 835 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we propose SocioVerse, a world model for social simulation driven by LLM-based agents based on a large-scale real-world user pool. As shown in Figure 2, we design modular components to address the above questions. The Social Environment injects up-to-date and external real-world information into the simulation. The User Engine and Scenario Engine respectively reconstruct realistic user context and orchestrate the simulation process to align the simulation with", + "bbox": [ + 169, + 842, + 825, + 912 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/af88ad03ac5ce1c47c53dd172206932141236f13f7154e620648f8b4af53663e.jpg", + "image_caption": [ + "Figure 2: An illustration of SocioVerse framework involving 4 powerful parts. The social environment provides an updated context for the simulation. During the simulation, the behavior engine takes the simulation setting, user profiles, and social information from the scenario engine, user engine, and social environment, respectively, and generates the results according to the query." + ], + "image_footnote": [], + "bbox": [ + 183, + 97, + 808, + 398 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "the real world. Given this rich contextual setup, the Behavior Engine then drives agents to reproduce human behaviors accordingly.", + "bbox": [ + 169, + 508, + 823, + 539 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To support the framework, we construct a user pool of 10 million individuals by collecting real-world social media data to power the user engine. Comparable in scale to the entire populations of Hungary or Greece, this extensive pool enables diverse and large-scale social simulations. For any customized simulation task, various sampling strategies can be applied to extract target user groups from the pool to support the simulation process.", + "bbox": [ + 169, + 542, + 823, + 614 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We conduct three simulations using the SocioVerse framework, each differing in research domain, user composition, and social environment: (a) presidential election prediction, (b) breaking news feedback, and (c) national economic survey. For each task, we compare the simulation results with real-world situations. Extensive and comprehensive experiments demonstrate that our framework serves as a robust foundation for building standardized and accurate large-scale social simulations. In summary, our key contributions are as follows:", + "bbox": [ + 169, + 619, + 826, + 703 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- SocioVerse: We propose a world model for social simulation comprising four powerful alignment modules, enabling diverse and trustworthy social simulations (as illustrated in Figure 2).", + "- 10M User Pool: A user pool of 10 million individuals, constructed from real-world behavioral data, enables large-scale and diverse social simulations, ranging from small interest groups to large citizen communities.", + "- Three Illustrative Simulations: We demonstrate the framework's capabilities through three distinct scenarios: presidential election prediction, breaking news feedback, and a national economic survey, providing a foundation for future research." + ], + "bbox": [ + 215, + 727, + 826, + 891 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Methods", + "text_level": 1, + "bbox": [ + 171, + 89, + 279, + 104 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Overall Framework", + "text_level": 1, + "bbox": [ + 171, + 121, + 316, + 135 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The SocioVerse framework follows a structured pipeline to achieve realistic social simulation results, as shown in Figure 2: (1) Social Environment collects updated information and contextual knowledge. Within the simulation environment, (2) User Engine aligns the simulated agents with target users, (3) Scenario Engine aligns the interaction structure with diverse scenarios, and (4) Behavior Engine aligns the behavioral pattern with real-world target groups.", + "bbox": [ + 169, + 148, + 826, + 219 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.1 Social Environment", + "text_level": 1, + "bbox": [ + 171, + 237, + 351, + 250 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Function The social environment provides event-related context to align the simulation environment with real-world conditions. By integrating up-to-date events, social statistics, and preference content into LLM-based agents, it enhances the realism of the simulation and improve agent decision-making.", + "bbox": [ + 169, + 263, + 826, + 306 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Components The social environment should encompass as much real-world social, cultural, and technological context as possible. It can be broadly categorized into three types: social structural information, social dynamic information, and personalized context.", + "bbox": [ + 169, + 321, + 823, + 364 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Social Structure: Social structural information provides agents with a rich knowledge base encompassing demographic distributions, cultural norms, urban infrastructures, and collective behavior patterns [57]. This data allows agents to behave in a way that aligns with the typical characteristics of their assigned demographic or geographic profile. For example, by incorporating regional dialect preferences, work-life habits, and common social values, the simulation can more accurately reflect public discourse trends, mobility behaviors, and economic interactions.", + "bbox": [ + 169, + 369, + 825, + 455 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Social Dynamics: Social dynamics encompass time-sensitive content continuously generated in the real world, such as news events and policy changes. Typically, this engine maintains an up-to-date event base to continuously collect real-world event news from mainstream news, and all the news articles contain time stamps and event-related tags so that LLM-based agents can comb through the timeline of the events and react accordingly [37].", + "bbox": [ + 169, + 460, + 823, + 531 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Personalized Context: In addition to the macro social environment, individuals also receive different personalized information feeds. Previous studies have explored that the recommendation system can enhance the behavior diversity of the agent [31, 56, 60]. Consequently, the preference content component constructs relevant posts and pushes them to agents according to their social interaction network and interesting topics.", + "bbox": [ + 169, + 536, + 825, + 606 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.2 User Engine", + "text_level": 1, + "bbox": [ + 171, + 625, + 299, + 638 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Function The user engine aligns simulated agents with a rich set of real-world user samples, enabling the creation of complex target users within the simulation.", + "bbox": [ + 169, + 650, + 826, + 680 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Components To support diverse user composition and effective user retrieval and description, the user engine incorporates a large user pool and a wide range of user labels.", + "bbox": [ + 169, + 696, + 823, + 724 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "User Pools: The user pool is designed to collect extensive digital footprints of individuals across social media platforms, enabling a more comprehensive characterization of real-world behavioral patterns and expression tendencies. To this end, we constructed a user pool covering a variety of social media platforms, including $\\mathbf{X}^1$ and Rednote2. Anomalous data, such as advertising and bot-generated content, is filtered by calculating the post frequency and average text similarity. The detailed procedure can be found in Appendix A. We index users and construct a user pool of 10 million users based on the collected social media posts. Formally, we define user pool as: $UserPool = \\{U_i, P_i \\mid i \\in \\mathbb{S}\\}$ , where the $i$ -th user $U_i$ derives from the collection of social media platforms $\\mathbb{S}$ with his/her related posts $P_i = \\{P_{i,1}, P_{i,2}, \\ldots\\}$ . The statistical summary of the user pool is provided in Table 1.", + "bbox": [ + 169, + 729, + 826, + 869 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "1https://x.com/", + "bbox": [ + 192, + 883, + 310, + 897 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "$^{2}$ https://www.xiaohongshu.com/", + "bbox": [ + 192, + 897, + 418, + 911 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/7ab3b60a9c281cf0675ca7cbaa1ac451358842d500a4f62bb1f322d99ada9ba4.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Source# Users# Posts
X1,006,51730,195,510
Rednote9,158,40440,963,735
", + "bbox": [ + 369, + 88, + 625, + 152 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 1: Statistical summary of the 10M user pool.", + "bbox": [ + 328, + 157, + 665, + 172 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "User Labels: User labels refer to the tagging and description of users, which can be represented using discrete attributes or continuous representation. Demographic descriptions of users are the most commonly used form of labeling. However, they are often not directly accessible. Therefore, we designed a demographic annotation system to infer and label user attributes. The process begins with multiple LLMs serving as initial annotators, classifying users across various demographic dimensions. Human annotators then evaluate and refine the LLM-generated labels, ensuring the reliability of the user tags dataset. The curated dataset is subsequently used to train demographic classifiers, enabling large-scale annotation in a cost-effective manner. Specifically, we annotate users across 15 demographic dimensions: age, gender, vocation, race, income, education, settlement type, region, employment, marital status, religious, party, ideology, BigFive personality, and hobbies. Each attribute is inferred by a specialized classifier trained on the corresponding subset of the user tags dataset. See Appendix B for further details.", + "bbox": [ + 169, + 191, + 826, + 357 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2.3 Scenario Engine", + "text_level": 1, + "bbox": [ + 171, + 373, + 328, + 387 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Function The scenario engine aligns various simulation structures with real-world contexts based on specific task formulations and scenario types, and then scales individual simulations by sampling according to demographic distributions provided by the user engine.", + "bbox": [ + 169, + 398, + 823, + 441 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Components The scenario engine formulates a wide range of real-world social situations, which can be summarized as archetypal scenario templates, including questionnaires, in-depth interviews, behavior experiments, and social media interaction.", + "bbox": [ + 169, + 455, + 825, + 497 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Questionnaire: The questionnaire scenario constructs the simulation in a 1-to-N manner, with one designed scale or questionnaire answered by multiple target users in a single round. This scenario is suitable for massive social investigation on specific topics, like election polls.", + "bbox": [ + 169, + 503, + 823, + 546 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Inddepth Interview: The in-depth interview scenario follows a 1-to-1 structure, where a simulated interviewer engages with an individual target user through multiple interaction rounds [43]. This iterative process allows for probing deeper into responses, clarifying ambiguities, and exploring underlying motivations. Such simulations are particularly useful for qualitative research on user experiences, psychological assessments, and exploratory studies where nuanced responses and detailed reasoning are essential.", + "bbox": [ + 169, + 551, + 825, + 635 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Behavior Experiment: The behavior experiment scenario is typically conducted in a 1-to-N or N-to-N format, depending on whether individual or group interactions are being studied [8, 42]. Simulated users are exposed to controlled conditions where their behavioral responses are observed across multiple rounds of interaction. These simulations help researchers examine decision-making processes, social influences, and cognitive biases in various experimental setups, such as consumer behavior studies or cooperative game simulations.", + "bbox": [ + 169, + 641, + 825, + 724 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Social Media Interaction: The social media interaction scenario adopts an N-to-N structure, where multiple simulated users engage in dynamic, multi-round exchanges in an online setting [30]. This scenario captures real-time interactions, including content sharing, comment threads, and viral spread dynamics, allowing researchers to analyze public discourse, opinion shifts, and information diffusion on social platforms. It is particularly valuable for studying trends in misinformation, political discussions, and network-based influence propagation.", + "bbox": [ + 169, + 729, + 825, + 814 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2.4 Behavior Engine", + "text_level": 1, + "bbox": [ + 171, + 830, + 328, + 844 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Function The behavior engine aims to align the behaviors of the agents with that of real users. The behavior engine integrates user history and experience from the user engine, the interaction mechanism from the scenario engine and social context from the social environment to predict the behavior of each individual.", + "bbox": [ + 169, + 854, + 825, + 910 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Components To achieve credible behavior simulation, the behavior engine needs to provide a robust simulation foundation, including traditional agent-based models and a series of LLMs.", + "bbox": [ + 169, + 90, + 823, + 119 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Traditional Agent-Based Modeling: Traditional agent-based modeling (ABM) relies on rule-based and mathematical models [9, 23, 32, 47, 52], where interactions among agents are typically realized through the broadcasting of predefined values. These values are derived from heuristic functions or theoretical mathematical formulations. Traditional ABM approaches are highly scalable and computationally efficient, making them well-suited for simulating large populations, especially marginal users with relatively limited influence.", + "bbox": [ + 169, + 126, + 823, + 209 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "LLM-powered Agents: LLMs leverage their role-playing capabilities to simulate user-generated content, and the abilities can be activated through various methods [29, 36, 51, 61-64]. Specifically, the behavior engine can be powered by general LLMs, expert LLMs, and domain-specific LLMs. Through non-parametric prompting, powerful general LLMs (e.g., GPT series and Qwen series) can act in accordance with predefined user profiles. Expert and domain-specific LLMs are acquired through parametric training, e.g., continual pretraining, supervised fine-tuning, and reinforcement learning. When target users exhibit complex profiles and the simulation requires deep domain expertise, these models are leveraged to enhance the professionalism and accuracy of agent behaviors.", + "bbox": [ + 169, + 215, + 826, + 325 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3 Implementation of Specific Scenarios", + "text_level": 1, + "bbox": [ + 171, + 347, + 514, + 364 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We implement three representative social simulation scenarios through the SocioVerse framework based on the implemented components: (a) presidential election prediction of America, (b) breaking news feedback analysis, and (c) national economic survey of China. These scenarios respectively address political communication, journalistic dissemination, and socioeconomic domains, demonstrating the framework's generalizability through standardized implementation pipelines.", + "bbox": [ + 169, + 378, + 823, + 449 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/41b8f7e83330617a874286474fbb177b1c17d9e2e45f3b39659735bbc7457011.jpg", + "image_caption": [ + "Figure 3: Illustration of three scenarios representing (a) presidential election prediction, (b) breaking news feedback, and (c) national economic survey." + ], + "image_footnote": [], + "bbox": [ + 181, + 463, + 390, + 580 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/9839e0e42e838abe4b61415c929efb4951f849533401bc247dd8cf0d2900ba80.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 401, + 463, + 607, + 580 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/5cd70d2f9aa944b30cd8014a3f27307a2828ab5935fadc5daf426be35f3eedea.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 620, + 463, + 816, + 580 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.1 Presidential Election Prediction of America", + "text_level": 1, + "bbox": [ + 171, + 645, + 514, + 659 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Task Description Presidential elections remain central to public engagement and party strategy formation [6, 46]. This study analyzes methods for large-scale election simulation using LLMs through the U.S. presidential system's Electoral College framework. In this indirect voting system, citizens vote for state electors (allocated by congressional representation) who formally elect the president. Most states employ a winner-takes-all allocation of electoral votes to the statewide majority winner, with our modeling focused on predicting these state-level outcomes.", + "bbox": [ + 169, + 671, + 823, + 753 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Target Group Distribution Extensive research has documented the influence of demographic factors on election outcomes [33, 53]. We model U.S. demographic and ideological diversity through integrated Census Bureau (2022 voting/registration) and ANES (2020) data [1]. This scenario incorporates 12 attributes from the user engine: socioeconomic (income, education, employment), geographic (region, area), and political (party, ideology) dimensions alongside demographic factors (age, gender, race, marital status, and religious status). Given available marginal distributions, we employ iterative proportional fitting (IPF) to synthesize agent populations, see Appendix C.1.", + "bbox": [ + 169, + 770, + 823, + 868 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Questionnaire Design We design the presidential election questionnaire based on abundant polls conducted by various media and research institutes [5, 24], incorporating both significant issues and", + "bbox": [ + 169, + 883, + 823, + 912 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 936, + 503, + 946 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "voter preferences. These elements are then optimized into proper forms for LLM-based agents by the scenario engine. The entire questionnaire can be found in Appendix D.1.", + "bbox": [ + 169, + 90, + 823, + 122 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Evaluation Metric Two metrics are used to comprehensively compare the simulated election results to the real-world results. (1) Accuracy rate (Acc) is measured by calculating the proportion of states for which the election simulation results align with the actual result, serving as a coarse-grained evaluation metric. (2) Root Mean Square Error (RMSE) is measured by calculating the simulated vote share and the actual vote share for each state, which serves as a fine-grained evaluation metric.", + "bbox": [ + 169, + 133, + 826, + 204 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.2 Breaking News Feedback", + "text_level": 1, + "bbox": [ + 171, + 219, + 390, + 234 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Task Description Journalism plays a crucial role in shaping public perception and opinion through agenda-setting, framing, and information dissemination [20, 55]. Online social media platforms have gradually replaced the influence of traditional paper media. When breaking news is released on social media platforms, its potential audience may hold different stances. We take the release of ChatGPT as our target news to evaluate the accuracy and foreseeability of public attitudes.", + "bbox": [ + 169, + 244, + 826, + 316 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Target Group Distribution We define all Rednote users in our pool as the universal set, identifying technology-interested users as the potential audience set $\\mathbb{P}$ , and those discussing ChatGPT via keyword matching as the ground truth set $\\mathbb{G}$ , with $\\mathbb{G} \\subset \\mathbb{P} \\subset UserPool$ . Context is limited to pre-news timeframes to prevent leakage. Using the potential audience distribution as prior, we sample agents with identical distribution sampling (IDS) as $D_{s} = IDS(UserPool, \\mathbb{P})$ , see Appendix C.2), considering demographics (gender, age, education, and consumption level) during sampling the user pools. Based on this, the task is to compare the consistency between the agents' attitudes toward news and those of the users in the ground truth set.", + "bbox": [ + 169, + 328, + 825, + 441 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Questionnaire Design We design the cognitive questionnaire using the ABC attitude model (Affect, Behavior, Cognition) [28], which outlines attitude formation as a hierarchy: cognition affects emotions, guiding behavior. Combined with a 5-point Likert scale [22], the questionnaire covers six dimensions: public cognition (PC), perceived risks (PR), perceived benefits (PB), trust (TR), fairness (FA), and public acceptance (PA). See Appendix D.2 for details.", + "bbox": [ + 169, + 452, + 823, + 525 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Evaluation Metric Agents from both sets answer the questionnaire for paired responses. Two evaluation dimensions assess feedback: (1) Normalized RMSE (NRMSE) measures point-wise differences between simulated and ground truth answers across PC, PR, PB, TR, FA, and PA as value evaluations; (2) KL-divergence (KL-Div) compares the 6-dimensional answer distributions between groups as consistency evaluations.", + "bbox": [ + 169, + 536, + 823, + 608 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.3 National Economic Survey of China", + "text_level": 1, + "bbox": [ + 171, + 622, + 464, + 638 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Task Description Economic simulation is another crucial part of massive social simulations as it models resource distribution, market dynamics, and financial behaviors, providing insights into economic stability and policy impacts [13, 54]. By integrating economic factors with social interactions, it enhances the prediction of systemic outcomes, guiding decision-making in areas such as governance, urban planning, and crisis management. We follow a national economic survey conducted by the National Bureau of Statistics of China, which interviews Chinese citizens on their monthly spending given the average salary of each province in China.", + "bbox": [ + 169, + 647, + 825, + 747 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Target Group Distribution The prior distribution is based on the methodology from the National Bureau of Statistics of China, which takes 160,000 families nationwide and calculates their incomes and spending as the national average statistics [39]. We sample nationwide agents from our user pool proportionally according to their region population and generate their income distribution according to the regional average income [38]. The detailed method can be referred to in Appendix C.3.", + "bbox": [ + 169, + 758, + 823, + 829 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Questionnaire Design Spending details in China Statistical Yearbook 2024 [40] are categorized into eight parts, i.e. food, clothing, housing, daily necessities & services, communication & transportation, education & entertainment, healthcare, and others. Consequently, the questionnaire design covers the above categories with examples and uses segmented interval options in each question. The entire questionnaire can be referred to in Appendix D.3.", + "bbox": [ + 169, + 842, + 828, + 914 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Evaluation Metric Both value evaluation and distribution evaluation are involved in the national economic survey as well. (1) NRMSE of the nine categories is measured between the simulated results and official statistics. (2) KL-Div is measured by taking the 8-item spending as a distribution to evaluate the consistency between the simulation and the real world.", + "bbox": [ + 169, + 90, + 823, + 148 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4 Results", + "text_level": 1, + "bbox": [ + 171, + 167, + 267, + 183 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.1 SocioVerse Can Support Diverse and Accurate Massive Social Simulations", + "text_level": 1, + "bbox": [ + 169, + 199, + 728, + 214 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/7cb07d45660942f4481e7ecfce1c3d40f9a6ce0877b27cea63d127d5b346085f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Scenario# Agents# DemographicsTypeSamplingSourceLanguage# QuestionsGround truth
PresElectPredict331,83612labelIPFXEN49real world
BreakNewsFeed20,0007labelIDSrednoteZH18calculated
NatEconSurvey16,0009label+numberIDSrednoteZH17real world
", + "bbox": [ + 173, + 232, + 823, + 296 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Experiment Settings We select powerful LLMs from different model families. For open-sourced models, we select Llama-3-70b-Instruct [14], Qwen2.5-72b-Instruct [59], DeepSeek-R1-671b [19], and DeepSeek-V3 [27]. For commercial models, we select GPT-4o $^3$ [41] and GPT-4o-mini $^4$ .", + "bbox": [ + 169, + 362, + 826, + 405 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We compare the settings of all three scenarios for better understanding, which is shown in Table 2. As the Presidential Election Prediction covers a 1-in-1,000 sample of the U.S. population, GPT-4o is excluded from comparison due to cost constraints. In terms of local model serving, Qwen2.5-72b-Instruct and Llama3-70b-Instruct models are both deployed on 8 NVIDIA RTX4090 GPUs via vLLM [25]. We set max tokens to 2,048 for all models to enable chain-of-thoughts during the generation and the temperature is set to 0.7 to encourage diversity. Implementation details for user pool construction and demographics annotation can be found in Appendix A and B.", + "bbox": [ + 169, + 410, + 825, + 508 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/c8315fc00f5be85637036238e2bbe85316d8d30a637370c74716c310a2613d7d.jpg", + "table_caption": [ + "Table 2: Detail settings of three simulation scenarios, where PresElectPredict, BreakNewsFeed, and NatEconSurvey denote three simulations mentioned in the paper, respectively. IPF and IDS denote iterative proportional fitting and identical distribution sampling, see Appendix C." + ], + "table_footnote": [], + "table_body": "
ModelPresElectPredictBreakNewsFeedNatEconSurvey
OverallBattlegroundOverallDeveloped-region
Acc↑RMSE↓Acc↑RMSE↓KL-Div↓RMSE↓KL-Div↓RMSE↓KL-Div↓RMSE↓
Llama3-70b0.8430.0640.7330.0450.6680.1990.0160.0260.0130.025
Qwen2.5-72b0.9220.0370.8000.0310.1130.0590.0660.0480.0430.039
DeepSeek-R1-671b\\\\0.6700.0650.3830.0820.0590.0450.0450.036
DeepSeek-V30.9220.0460.8670.0410.2630.0720.0350.0360.0230.030
GPT-4o-mini\\\\0.8000.0390.1950.1140.0460.0450.0300.036
GPT-4o\\\\\\\\0.1960.0550.0620.0510.0360.038
", + "bbox": [ + 173, + 521, + 823, + 656 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 3: Overall results of the three scenarios, where subset Battleground indicates battleground states in the U.S. in the presidential election and subset Developed-Region indicates top-10 developed regions in China in terms of GDP.", + "bbox": [ + 169, + 659, + 823, + 702 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Results The overall simulation results of the three scenarios are shown in Table 3. We also report subset results for presidential election prediction and national economic survey.", + "bbox": [ + 169, + 720, + 823, + 750 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "- Presidential Election Prediction We report the overall results and the battleground states' results separately. The prediction of battleground states is challenging even in the real world and thus becomes the focus during the election process. According to the results, GPT-4o-mini and Qwen2.5-72b show competitive performance both in Acc and RMSE. Typically, according to the winner-takes-all rule, over $90\\%$ state voting results are predicted correctly, which means the simulation achieves a high-precision macroscopic reduction of the real-world election results. After the case study, we find that DeepSeek-R1-671b sometimes falls into overthinking, resulting in less accurate results.", + "bbox": [ + 215, + 760, + 826, + 872 + ], + "page_idx": 7 + }, + { + "type": "page_footnote", + "text": "3gpt-4o-2024-08-06", + "bbox": [ + 192, + 883, + 334, + 897 + ], + "page_idx": 7 + }, + { + "type": "page_footnote", + "text": "4gpt-4o-mini-2024-07-18", + "bbox": [ + 192, + 896, + 372, + 911 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Breaking News Feedback The results measure the overall consistency of each model compared with the real-world users' reactions and attitudes. To this end, the performances of GPT-4o and Qwen2.5-72b are more aligned with real-world perspectives than other models in terms of KL-Div and NRMSE, respectively, and the following detailed analysis will demonstrate that the models consistently capture and accurately predict public trends and opinions.", + "- National Economic Survey We report the overall results and results for the top 10 regions by GDP (i.e., developed regions) separately. Generally, all the models closely align with real-world statistics. Llama3-70b shows a significant superiority over other models in the economic survey scenario and all the models perform better in the 1st-Region subset than overall. The results demonstrate that individuals' spending habits can be accurately reproduced under the SocioVerse framework, especially in developed regions." + ], + "bbox": [ + 215, + 90, + 826, + 266 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The overall results from both value evaluation and distribution evaluation of three simulations sufficiently prove that SocioVerse can support diverse and accurate massive social simulations with a standard pipeline and minimal changes with human experts in the loop. However, the choice of underlying LLMs can affect simulation precision across different scenarios, highlighting the need for further study.", + "bbox": [ + 169, + 275, + 826, + 347 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.2 Prior Distribution and Real-World Knowledge Can Enhance Simulation Accuracy in Presidential Election Predictions", + "text_level": 1, + "bbox": [ + 169, + 362, + 805, + 393 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/1c254baddeee82d257854c6de8cf0642cfab2a1bf3f197f0a8fb52bcd38fa176.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelAcc↑RMSE↓
Llama3-70b0.7330.045
- w/o Knowledge0.5330.051
- w/o Knowledge & Piror Distribution0.6000.386
Qwen2.5-72b0.8000.031
- w/o Knowledge0.8000.033
- w/o Knowledge & Piror Distribution0.6000.370
GPT-4o-mini0.8000.039
- w/o Knowledge0.8000.052
- w/o Knowledge & Piror Distribution0.6670.323
", + "bbox": [ + 336, + 407, + 661, + 573 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Table 4: Ablation experiment results on the presidential election prediction simulation, where -w/o Knowledge denotes without real-world user knowledge and -w/o Piror Distribution denotes using random demographics distribution.", + "bbox": [ + 169, + 578, + 823, + 621 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We conduct an ablation study on the presidential election prediction simulation to assess the impact of prior demographics distribution and real-world user knowledge. As shown in Table 4, prior demographics distribution significantly improves the accuracy of the simulation in both Acc and RMSE compared to random demographics distribution. Additionally, past posts from users on social media platforms improve the fine-grained performance, especially for Llama3-70b in Acc and all the models in RMSE. We can tell from the ablation study that both prior distribution and real-world knowledge in the SocioVerse pipeline are significant during the simulation.", + "bbox": [ + 169, + 632, + 826, + 729 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.3 Group Preference and Perspectives Can Be Well Reflected in Breaking News Feedback", + "text_level": 1, + "bbox": [ + 169, + 747, + 816, + 763 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "During the Breaking News Feedback simulation, the core concern is whether the preferences and perspectives of the target group are well captured and reflected in the results. We reformulate the original questionnaire into the Likert 6-dimension scale ranging from 1 to 5 points, representing from totally disagree to totally agree. As the ground truth of the simulation is calculated by prompting LLM agents from the ground truth set, the simulated and real results are paired for each model, as shown in Figure 4. All the models powered by the potential audience set during the simulation tend to behave consistently with the ground truth users. However, Llama3-70b perform poorly with a larger gap between the simulated and real results than other models. GPT-4o-mini shows different attitudes in the fairness (FA) and public acceptance (PA) dimensions, which may be because the news is related to OpenAI. Another trend indicates that, generally, all the models perform more", + "bbox": [ + 169, + 772, + 826, + 912 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/f30fe370c7d43ea99207a6f091595cf5559b34c28ba881f238d60524e7804d01.jpg", + "image_caption": [ + "Figure 4: An illustration of the performances of the breaking news feedback simulation, where PC, PR, PB, TR, FA, and PA denote six dimensions from the Likert scale (see §3.2 questionnaire design), with 1-point standing for totally disagree and 5-point for totally agree." + ], + "image_footnote": [], + "bbox": [ + 174, + 88, + 823, + 189 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "disagreeably in the simulated results than the real results, which also underlines the potential risk of biases during the public opinion simulation.", + "bbox": [ + 169, + 268, + 823, + 297 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.4 The Capabilities of LLMs Vary in Different Domains in National Economic Survey", + "text_level": 1, + "bbox": [ + 169, + 315, + 789, + 330 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/9971298eafb0097555473200ebe95969169d99fbf1d407eea813867188612681.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ItemLlama3-70bQwen2.5-72bGPT-4o-miniGPT-4oDeepSeek-R1
Daily0.0070.0090.0060.0100.009
Clothing0.0120.0150.0190.0150.015
Transportation_Comunication0.0160.0200.0270.0230.017
Education_Entertainment0.0180.0220.0240.0170.022
Medical0.0230.0620.0410.0570.060
Food0.0370.0310.0310.0400.032
Household0.0520.1100.1070.1200.102
Others0.0080.0080.0100.0050.009
", + "bbox": [ + 238, + 349, + 756, + 474 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Table 5: Detailed results on the national economic survey simulation reported in NRMSE, where the Item column indicates the components of spending. The best results are **bolded*; the second-best results are underlined.", + "bbox": [ + 169, + 479, + 823, + 521 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "The simulation of the national economic survey covers 8 spending dimensions. The overall results in Table 3 show the average performance of these dimensions, while model performances among these dimensions can also vary. We calculate the averaged NRMSE of 31 regions on each spending level, as shown in Table 5. It is worth mentioning that all the models show high consistency. Eliminating the others item, all the models perform best on daily necessities spending planning and worst on housing spending, which can reveal the LLM's preference on the economic decision-making and highlight the challenge in housing spending strategy.", + "bbox": [ + 169, + 531, + 825, + 630 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "5 Discussion", + "text_level": 1, + "bbox": [ + 171, + 650, + 294, + 667 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In this study, we introduce a generalized social simulation framework SocioVerse and evaluated its performance across three distinct real-world scenarios. Our findings indicate that state-of-the-art LLMs demonstrate a notable ability to simulate human responses in complex social contexts, although some gaps still remain between the simulated response and observed real-world outcomes. Therefore, future research may need to incorporate a broader range of scenarios and develop more fine-grained evaluations built upon the current analytic engine, to further explore and expand the boundaries of LLMs' simulation capabilities. Such efforts could pave the way for establishing LLMs as comprehensive and reliable tools for large-scale social simulation.", + "bbox": [ + 169, + 683, + 826, + 794 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We observed several key patterns across the simulations of the scenarios. First, incorporating demographic distributions and users' historical experiences significantly improved simulation accuracy. These findings highlight the importance of building a large, demographically rich user pool, complemented by a multi-dimensional user tagging system for more precise modeling of group-specific behaviors. Second, under consistent measurement protocols, LLMs produced broadly similar simulations of human attitudes and ideologies. However, certain models, such as GPT-4o-mini, showed notable inconsistencies, indicating that model-specific preferences or biases remain influential and warrant closer scrutiny in future work. Finally, we found that while LLMs perform well in simple daily", + "bbox": [ + 169, + 800, + 826, + 912 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "scenarios, they underperform in complex situations requiring contextual knowledge, underscoring the need to align model behavior with real-world experiences and social contexts.", + "bbox": [ + 174, + 90, + 823, + 119 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Notably, the current version has only implemented part of our framework, indicating significant potential for enhancing the accuracy and quality of social simulations. Future work can focus on refining each module for better collaboration, enabling the framework to achieve its full potential. For instance, the incorporation of the social environment can inject up-to-date knowledge into LLMs, enhancing the understanding of social dynamics. The scenario engine can not only provide survey-based simulation but also expand to diverse formats such as social interviews and free interactions. Additionally, further optimization of the general LLMs and expert LLMs adaptation in the behavior engine will enable better accommodation of complex target user groups, such as minority groups and individuals with special disabilities. The analysis engine can introduce an autonomous planning module to improve the overall credibility of simulation results.", + "bbox": [ + 174, + 125, + 826, + 263 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Beyond the social simulation framework, our work underscores the potential to bridge the gap between autonomous AI systems and traditional social science, offering social scientists a seamless, cost-effective tool for conducting social experiments with minimal setup. Such tools not only help analyze and validate psychological and sociological theories or hypotheses, such as behavioral economics and social identity theory, but also assist in predicting large-scale social impacts like policy changes, social movements, or public health crises. By providing an efficient and scalable simulation environment, our framework is not just a research tool, but an experimental platform for exploring the dynamic changes and long-term trends of virtual societies, with the aim of becoming a realistic mapping for real-world societies.", + "bbox": [ + 174, + 270, + 826, + 395 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Acknowledgement", + "text_level": 1, + "bbox": [ + 174, + 414, + 330, + 431 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We would like to express our sincere gratitude to Professor Rongwei Chu and his research team for their invaluable support in this work. The project's computational resources are supported by the CFFF platform of Fudan University.", + "bbox": [ + 174, + 445, + 823, + 488 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 491, + 935, + 506, + 946 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 173, + 89, + 269, + 106 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] American National Election Studies. Anes 2020 time series study full release [dataset and documentation], 2021. February 10, 2022 version.", + "[2] J. R. Anthis, R. Liu, S. M. Richardson, A. C. Kozlowski, B. Koch, J. Evans, E. Brynjolfsson, and M. Bernstein. LIm social simulations are a promising research method. arXiv preprint arXiv:2504.02234, 2025.", + "[3] L. P. Argyle, E. C. Busby, N. Fulda, J. R. Gubler, C. Rytting, and D. Wingate. Out of one, many: Using language models to simulate human samples. Political Analysis, 31(3):337-351, 2023.", + "[4] Z. Bao, Q. Liu, Y. Guo, Z. Ye, J. Shen, S. Xie, J. Peng, X. Huang, and Z. Wei. Piers: Personalized intelligent outpatient reception based on large language model with multi-agents medical scenario simulation. arXiv preprint arXiv:2411.13902, 2024.", + "[5] A. Barnett and A. Sarfati. The polls and the us presidential election in 2020.... and 2024. Statistics and Public Policy, 10(1):2199809, 2023.", + "[6] L. M. Bartels. Uninformed votes: Information effects in presidential elections. American journal of political science, pages 194-230, 1996.", + "[7] I. Beltagy, M. E. Peters, and A. Cohan. Longformer: The long-document transformer. arXiv preprint arXiv:2004.05150, 2020.", + "[8] A. K. Chandra, D. C. Kozen, and L. J. Stockmeyer. Alternation. Journal of the Association for Computing Machinery, 28(1):114-133, 1981.", + "[9] Y.-S. Chuang and T. T. Rogers. Computational agent-based models in opinion dynamics: A survey on social simulations and empirical studies. arXiv preprint arXiv:2306.03446, 2023.", + "[10] V. Cologna, N. G. Mede, S. Berger, J. Besley, C. Brick, M. Joubert, E. W. Maibach, S. Mihelj, N. Oreskes, M. S. Schäfer, et al. Trust in scientists and their role in society across 68 countries. Nature Human Behaviour, pages 1–18, 2025.", + "[11] T. Connolly. Micromotives and macrobehavior., 1979.", + "[12] J. Devlin, M.-W. Chang, K. Lee, and K. Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2019.", + "[13] F. Dignum, V. Dignum, P. Davidsson, A. Ghorbani, M. van der Hurk, M. Jensen, C. Kammler, F. Lorig, L. G. Ludescher, A. Melchior, et al. Analysing the combined health, social and economic impacts of the coronavirus pandemic using agent-based social simulation. *Minds and Machines*, 30:177–194, 2020.", + "[14] A. Dubey, A. Jauhri, A. Pandey, A. Kadian, A. Al-Dahle, A. Letman, A. Mathur, A. Schelten, A. Yang, A. Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024.", + "[15] C. Gao, X. Lan, N. Li, Y. Yuan, J. Ding, Z. Zhou, F. Xu, and Y. Li. Large language models empowered agent-based modeling and simulation: A survey and perspectives. *Humanities and Social Sciences Communications*, 11(1):1-24, 2024.", + "[16] C. Gao, X. Lan, Z. Lu, J. Mao, J. Piao, H. Wang, D. Jin, and Y. Li. S3: Social-network simulation system with large language model-empowered agents. arXiv preprint arXiv:2307.14984, 2023.", + "[17] S. Giorgi, V. E. Lynn, K. Gupta, F. Ahmed, S. Matz, L. H. Ungar, and H. A. Schwartz. Correcting sociodemographic selection biases for population prediction from social media. In Proceedings of the International AAAI Conference on Web and Social Media, volume 16, pages 228-240, 2022.", + "[18] B. E. GOLDSMITH, Y. HORIUCHI, and K. MATUSH. Does public diplomacy sway foreign public opinion? identifying the effect of high-level visits. American Political Science Review, 115(4):1342-1357, 2021.", + "[19] D. Guo, D. Yang, H. Zhang, J. Song, R. Zhang, R. Xu, Q. Zhu, S. Ma, P. Wang, X. Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025.", + "[20] B. Gómez-Calderón and Y. Ceballos. Journalism and artificial intelligence: the treatment of the chatbots in the Spanish press. index.comunicación, 14(1):281–300, Jan. 2024." + ], + "bbox": [ + 173, + 112, + 825, + 911 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[21] J. C. Jackson, D. Rand, K. Lewis, M. I. Norton, and K. Gray. Agent-based modeling: A guide for social psychologists. Social Psychological and Personality Science, 8(4):387-395, 2017.", + "[22] A. Joshi, S. Kale, S. Chandel, and D. K. Pal. Likert scale: Explored and explained. British journal of applied science & technology, 7(4):396-403, 2015.", + "[23] M. Jusup, P. Holme, K. Kanazawa, M. Takayasu, I. Romić, Z. Wang, S. Geček, T. Lipić, B. Podobnik, L. Wang, et al. Social physics. Physics Reports, 948:1-148, 2022.", + "[24] S. Keeter, N. Hatley, A. Lau, and C. Kennedy. What 2020's election poll errors tell us about the accuracy of issue polling. Pew Research Center Methods, 2021.", + "[25] W. Kwon, Z. Li, S. Zhuang, Y. Sheng, L. Zheng, C. H. Yu, J. E. Gonzalez, H. Zhang, and I. Stoica. Efficient memory management for large language model serving with pagedattention. In Proceedings of the ACM SIGOPS 29th Symposium on Operating Systems Principles, 2023.", + "[26] S. Lee, T.-Q. Peng, M. H. Goldberg, S. A. Rosenthal, J. E. Kotcher, E. W. Maibach, and A. Leiserowitz. Can large language models capture public opinion about global warming? an empirical assessment of algorithmic fidelity and bias. arXiv preprint arXiv:2311.00217, 2023.", + "[27] A. Liu, B. Feng, B. Xue, B. Wang, B. Wu, C. Lu, C. Zhao, C. Deng, C. Zhang, C. Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024.", + "[28] B. Liu, Y. Xu, Y. Yang, and S. Lu. How public cognition influences public acceptance of ccus in china: Based on the abc (affect, behavior, and cognition) model of attitudes. Energy Policy, 156:112390, 2021.", + "[29] X. Liu, S. Yang, X. Zhang, H. Kuang, L. Sun, Y. Yang, S. Chen, X. Huang, and Z. Wei. Ai-press: A multi-agent news generating and feedback simulation system powered by large language models. arXiv preprint arXiv:2410.07561, 2024.", + "[30] Y. Liu, X. Chen, X. Zhang, X. Gao, J. Zhang, and R. Yan. From skepticism to acceptance: Simulating the attitude dynamics toward fake news. arXiv preprint arXiv:2403.09498, 2024.", + "[31] H. Lyu, S. Jiang, H. Zeng, Y. Xia, Q. Wang, S. Zhang, R. Chen, C. Leung, J. Tang, and J. Luo. Llm-rec: Personalized recommendation via prompting large language models. arXiv preprint arXiv:2307.15780, 2023.", + "[32] C. M. Macal and M. J. North. Agent-based modeling and simulation. In Proceedings of the 2009 winter simulation conference (WSC), pages 86-98. IEEE, 2009.", + "[33] B. Major, A. Blodorn, and G. Major Blascovich. The threat of increasing diversity: Why many white americans support trump in the 2016 presidential election. Group Processes & Intergroup Relations, 21(6):931-940, 2018.", + "[34] X. Mou, X. Ding, Q. He, L. Wang, J. Liang, X. Zhang, L. Sun, J. Lin, J. Zhou, X. Huang, et al. From individual to society: A survey on social simulation driven by large language model-based agents. arXiv preprint arXiv:2412.03563, 2024.", + "[35] X. Mou, Z. Li, H. Lyu, J. Luo, and Z. Wei. Unifying local and global knowledge: Empowering large language models as political experts with knowledge graphs. In Proceedings of the ACM Web Conference 2024, pages 2603–2614, 2024.", + "[36] X. Mou, J. Liang, J. Lin, X. Zhang, X. Liu, S. Yang, R. Ye, L. Chen, H. Kuang, X. Huang, and Z. Wei. Agentsense: Benchmarking social intelligence of language agents through interactive scenarios, 2024.", + "[37] X. Mou, Z. Wei, and X. Huang. Unveiling the truth and facilitating change: Towards agent-based large-scale social movement simulation. arXiv preprint arXiv:2402.16333, 2024.", + "[38] NBS China. Communiqué of the Seventh National Population Census of the People's Republic of China. Technical report, 2023. Accessed: 2025-02-14.", + "[39] NBS China. Explanatory Notes on Main Statistical Indicators – Population, Society, and Labor (China Statistical Yearbook 2023), 2023. Accessed: 2025-02-14.", + "[40] NBS China. China Statistical Yearbook 2024, 2024. Accessed: 2025-02-14.", + "[41] OpenAI. GPT-4o System Card. Technical report, 2024. Accessed: 2025-02-14." + ], + "bbox": [ + 173, + 90, + 826, + 912 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[42] J. S. Park, J. O'Brien, C. J. Cai, M. R. Morris, P. Liang, and M. S. Bernstein. Generative agents: Interactive simulacra of human behavior. In Proceedings of the 36th annual acm symposium on user interface software and technology, pages 1-22, 2023.", + "[43] J. S. Park, C. Q. Zou, A. Shaw, B. M. Hill, C. Cai, M. R. Morris, R. Willer, P. Liang, and M. S. Bernstein. Generative agent simulations of 1,000 people. arXiv preprint arXiv:2411.10109, 2024.", + "[44] L. Peisakhin, N. Stoop, and P. Van der Windt. Who hosts? the correlates of hosting the internally displaced. American Political Science Review, pages 1-16, 2024.", + "[45] F. Ribeiro, L. Henrique, F. Benevenuto, A. Chakraborty, J. Kulshrestha, M. Babaei, and K. Gummadi. Media bias monitor: Quantifying biases of social media news outlets at large-scale. In Proceedings of the International AAAI Conference on Web and Social Media, volume 12, 2018.", + "[46] S. J. Rosenstone. Forecasting presidential elections. 1981.", + "[47] T. C. Schelling. Models of segregation. The American economic review, 59(2):488-493, 1969.", + "[48] T. C. Schelling. Dynamic models of segregation. Journal of mathematical sociology, 1(2):143-186, 1971.", + "[49] Y. Shao, L. Li, J. Dai, and X. Qiu. Character-llm: A trainable agent for role-playing. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 13153–13187, 2023.", + "[50] E. R. Smith and F. R. Conrey. Agent-based modeling: A new approach for theory building in social psychology. *Personality and social psychology review*, 11(1):87-104, 2007.", + "[51] L. Sun, S. Wang, X. Huang, and Z. Wei. Identity-driven hierarchical role-playing agents. arXiv preprint arXiv:2407.19412, 2024.", + "[52] S. Tang. Idea, action, and outcome. Innovation in the Social Sciences, 2(2):123-170, 2024.", + "[53] R. A. Teixeira. Red, blue, and purple America: the future of election demographics. Rowman & Littlefield, 2009.", + "[54] T. Trimborn, P. Otte, S. Cramer, M. Beikirch, E. Pabich, and M. Frank. Subcemm: A simulator for agent-based computational economic market models. Computational economics, 55(2):707-744, 2020.", + "[55] A. van Dalen. Revisiting the algorithms behind the headlines. how journalists respond to professional competition of generative ai. Journalism Practice, pages 1-18, 2024.", + "[56] L. Wang, J. Zhang, H. Yang, Z. Chen, J. Tang, Z. Zhang, X. Chen, Y. Lin, R. Song, W. X. Zhao, et al. User behavior simulation with large language model based agents. arXiv preprint arXiv:2306.02552, 2023.", + "[57] K. Wu, X. Mou, L. Xue, Z. Ying, W. Wang, Q. Zhang, X.-J. Huang, and Z. Wei. Pasum: A pre-training architecture for social media user modeling based on text graph. In Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024), pages 12644-12656, 2024.", + "[58] B. Xiao, Z. Yin, and Z. Shan. Simulating public administration crisis: A novel generative agent-based simulation system to lower technology barriers in social science research. arXiv preprint arXiv:2311.06957, 2023.", + "[59] A. Yang, B. Yang, B. Zhang, B. Hui, B. Zheng, B. Yu, C. Li, D. Liu, F. Huang, H. Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024.", + "[60] Z. Yang, Z. Zhang, Z. Zheng, Y. Jiang, Z. Gan, Z. Wang, Z. Ling, J. Chen, M. Ma, B. Dong, et al. Oasis: Open agents social interaction simulations on one million agents. arXiv preprint arXiv:2411.11581, 2024.", + "[61] R. Ye, Y. Zhang, Y. Zhang, H. Kuang, Z. Wei, and P. Sun. Multi-agent kto: Reinforcing strategic interactions of large language model in language game. arXiv preprint arXiv:2501.14225, 2025.", + "[62] S. Yue, S. Wang, W. Chen, X. Huang, and Z. Wei. Synergistic multi-agent framework with trajectory learning for knowledge-intensive tasks. arXiv preprint arXiv:2407.09893, 2024.", + "[63] X. Zhang, H. Kuang, X. Mou, H. Lyu, K. Wu, S. Chen, J. Luo, X. Huang, and Z. Wei. SoMeLVLM: A large vision language model for social media processing. In L.-W. Ku, A. Martins, and V. Srikumar, editors, Findings of the Association for Computational Linguistics ACL 2024, pages 2366-2389, Bangkok, Thailand and virtual meeting, Aug. 2024. Association for Computational Linguistics.", + "[64] X. Zhang, J. Lin, L. Sun, W. Qi, Y. Yang, Y. Chen, H. Lyu, X. Mou, S. Chen, J. Luo, et al. Electionsim: Massive population election simulation powered by large language model driven agents. arXiv preprint arXiv:2410.20746, 2024." + ], + "bbox": [ + 173, + 90, + 826, + 910 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A Data Cleaning Details", + "text_level": 1, + "bbox": [ + 171, + 89, + 393, + 107 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A.1 Content Data Extraction", + "text_level": 1, + "bbox": [ + 171, + 119, + 388, + 133 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We extract only post-related content on all the social media platforms to avoid violating privacy policies. Specifically, the data list on each platform is shown in Table 6.", + "bbox": [ + 169, + 145, + 823, + 174 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/9ab297241e7b5ad5640f2f7c89ea694c7a2b607437aa0355f16e8a8ee7812b35.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
PlatformData list
Xuser ID, tweet, #likes, #coments, #retweets
Rednoteuser ID, notes, #likes, #comments
", + "bbox": [ + 336, + 185, + 658, + 263 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Table 6: Data list for each social media platform during the data collection.", + "bbox": [ + 250, + 267, + 743, + 282 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A.2 Abnormal Data Filtering", + "text_level": 1, + "bbox": [ + 171, + 300, + 393, + 316 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We filter the abnormal data to guarantee the quality through text similarity calculation. Typically, all the textual content from the same user is calculated by means of the word repetition ratio. The threshold is set to 0.3. If the ratio surpasses the threshold, the user is considered likely to be a robot or advertising and will be filtered.", + "bbox": [ + 169, + 325, + 826, + 383 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 934, + 508, + 946 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B Demographics Annotation System", + "text_level": 1, + "bbox": [ + 171, + 89, + 493, + 107 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B.1 LLM Annotation", + "text_level": 1, + "bbox": [ + 171, + 119, + 336, + 133 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "To save costs, we first sample a subset of the user pool and employ multiple power LLMs for annotation. Due to the long time span of this work, users from different data sources in the user pool have used the powerful LLMs available at the time. For users derived from the X, GPT-4o $^5$ , Claude3.5-Sonnet $^6$ , and Gemini-1.5 $^7$ are employed. For users derived from the Rednote, GPT-4o, Cluade3.5-Sonnet, and Qwen2.5-72b are employed.", + "bbox": [ + 169, + 145, + 826, + 215 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B.2 Human Evaluation", + "text_level": 1, + "bbox": [ + 171, + 231, + 349, + 244 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We employ 7 professional human annotators to verify the results annotated by LLMs. Typically, each annotator is required to re-associate the demographic factors without the LLM labels. All the data are verified by at least 2 human annotators. The overall consistency between humans and LLMs is shown in Table 7.", + "bbox": [ + 169, + 256, + 823, + 311 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/95378075e345f87be207b28094b42ea42f66abffcb8dd1545b7c107a69d3e97d.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelsHuman (X)Human (Rednote)
GPT-4o0.9050.723
Claude3.50.9010.659
Gemini-1.50.713\\
Qwen2.5\\0.846
Majority votes0.9560.849
", + "bbox": [ + 305, + 321, + 692, + 438 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B.3 Classifier Training", + "text_level": 1, + "bbox": [ + 171, + 491, + 346, + 507 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We take the majority-voted labels from different LLMs to construct the training dataset. Considering the difference in mainstream language used on different platforms, we employ LongFormer [7] for X data and employ Bert-base-chinese [12] for Rednote. The implementation details are shown in Table 8.", + "bbox": [ + 169, + 516, + 823, + 571 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/2e6b8fa3fd5779995f19c798aae290400cdd80c75f2319f40980aedd30481058.jpg", + "table_caption": [ + "Table 7: Human annotators' verification results. We report the consistency between humans and different LLMs." + ], + "table_footnote": [], + "table_body": "
ParamsLongFormerBert-base-chinese
train_size10,00010,000
# classifiers54
max_tokens4096512
learning_rate5e-55e-5
batch_size1632
optimizerAdamWAdamW
epochs310
device8*40902*4090
", + "bbox": [ + 305, + 582, + 692, + 747 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Table 8: Implementation details for demographic classifiers.", + "bbox": [ + 299, + 751, + 694, + 766 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We report the performances of demographic classifiers on each demographic factor in Table 9.", + "bbox": [ + 169, + 776, + 790, + 791 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B.4 Overall Distribution of the User Pool", + "text_level": 1, + "bbox": [ + 171, + 806, + 472, + 821 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We employ the demographic classifiers to annotate all of the users in the user pool, and the overall distributions are shown in Figure 5. For other demographics in specific simulations that are not", + "bbox": [ + 169, + 832, + 823, + 861 + ], + "page_idx": 15 + }, + { + "type": "page_footnote", + "text": "5 gpt-4o-2024-08-06", + "bbox": [ + 192, + 869, + 334, + 883 + ], + "page_idx": 15 + }, + { + "type": "page_footnote", + "text": "6claude-3-5-sonnet-20240620", + "bbox": [ + 192, + 883, + 401, + 896 + ], + "page_idx": 15 + }, + { + "type": "page_footnote", + "text": "7 gemini-1.5-pro", + "bbox": [ + 192, + 896, + 290, + 911 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/b3698fd8a7cc0afbf47a6e31691ed9fd5974990404ef71422f83c8297bb7cea7.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DemosLongFormerBert-base-chinese
AccF1AccF1
Gender0.8750.9040.9260.958
Age0.9020.8730.9250.920
Party0.8490.846\\\\
Ideology0.8100.807\\\\
Race0.7790.768\\\\
Consumption\\\\0.7490.748
Education\\\\0.9540.975
", + "bbox": [ + 305, + 88, + 689, + 242 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Table 9: Performance of demographic classifiers on test set.", + "bbox": [ + 300, + 247, + 694, + 263 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "considered in prior distribution, only users from the sampled user pool are annotated by the majority votes of LLMs.", + "bbox": [ + 169, + 281, + 823, + 310 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/0cb012c6c55c49fd65fa15745af750ea86382e3c5992b62bd56df63efb6706fc.jpg", + "image_caption": [ + "Figure 5: Demographic distribution on X and Rednote user pool." + ], + "image_footnote": [], + "bbox": [ + 178, + 323, + 488, + 518 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/cb26ecd99c58640d8eb4effc287a4e8bee0c4214555edb59f3d149882199fb82.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 491, + 324, + 815, + 518 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C Demographic Distribution Sampling Details", + "text_level": 1, + "bbox": [ + 171, + 89, + 578, + 107 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "C.1 Iterative Proportional Fitting", + "text_level": 1, + "bbox": [ + 171, + 119, + 421, + 135 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "In our study, we follow the classical IPF method to construct the joint distribution of all the attributes in our simulation. Specifically, we start with a two-way table with individual components denoted as $x_{ij}$ and targeted estimation $\\hat{x}_{ij}$ . The targeted estimation $\\hat{x}_{ij}$ satisfies $\\Sigma_j\\hat{x}_{ij} = v_i$ and $\\Sigma_i\\hat{x}_{ij} = w_j$ . The iterations are specified as follows:", + "bbox": [ + 169, + 145, + 826, + 202 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Let $\\hat{x}_{ij}^{(0)} = x_{ij}$ . For $\\alpha > 1$ :", + "bbox": [ + 169, + 210, + 354, + 232 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {x} _ {i j} ^ {(2 \\alpha - 1)} = \\frac {\\hat {x} _ {i j} ^ {(2 \\alpha - 2)} v _ {i}}{\\sum_ {k = 1} ^ {J} \\hat {x} _ {i j} ^ {(2 \\alpha - 2)}} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 411, + 239, + 825, + 282 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {x} _ {i j} ^ {(2 \\alpha)} = \\frac {\\hat {x} _ {i j} ^ {(2 \\alpha - 1)} w _ {j}}{\\Sigma_ {k = 1} ^ {I} \\hat {x} _ {i j} ^ {(2 \\alpha - 1)}} \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 421, + 286, + 825, + 330 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The iterations end when the estimated marginals are sufficiently close to the real marginals or when they stabilize without further convergence.", + "bbox": [ + 169, + 339, + 823, + 368 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "For the presidential election simulation, we implement the IPF algorithm for each state using five attributes: gender, race, age group, ideology, and partisanship. In most cases, the algorithm does not converge, but the gaps between the estimated and actual marginals are less than $5\\%$ , with 888 out of 918 marginals falling within this range. For the outliers, since IPF adjusts proportionally to the marginals, the overall ratio of marginals remains consistent. We then use the estimated joint distribution and marginals for our massive simulation.", + "bbox": [ + 169, + 373, + 825, + 457 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "C.2 Identical Distribution Sampling", + "text_level": 1, + "bbox": [ + 171, + 472, + 439, + 488 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Identical distribution sampling, also known as direct sampling, is applied when the joint distribution of multiple demographics is available. Given feature $X$ and $Y$ , the joint distribution can be formulated as $p(X,Y)$ . Then, identical distribution sampling can be formulated as follows:", + "bbox": [ + 169, + 497, + 823, + 541 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\left(X _ {i}, Y _ {i}\\right) \\sim p (X, Y) \\quad i = 1, 2, \\dots , n \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 377, + 542, + 825, + 560 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "For breaking news feedback simulations, as the ground truth set is directly from the Rednote, we can obtain all the users' demographics and calculate the joint distribution. Simultaneously, the scale of the user pool satisfies the direct sampling requirements.", + "bbox": [ + 169, + 570, + 826, + 613 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "C.3 Prior Distribution of National Economic Survey", + "text_level": 1, + "bbox": [ + 171, + 628, + 550, + 643 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "For the national economic survey distribution, only average income is available from the official data. As a result, we generate the prior income distribution at the regional level. The income distribution across different regions exhibits significant heterogeneity, often characterized by a right-skewed pattern. To model this distribution, we adopt a mixture distribution approach, combining a lognormal distribution for the majority of the population with a Pareto distribution for the high-income segment. This hybrid model captures both the bulk of wage earners and the long-tail effect observed in high-income groups.", + "bbox": [ + 169, + 652, + 826, + 751 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Formally, let $X$ denote an individual's wage. We assume that for the lower and middle-income groups $(X < x_{\\min})$ , incomes follow a log-normal distribution:", + "bbox": [ + 169, + 756, + 823, + 786 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\nX \\sim \\log \\text {N o r m a l} (\\mu , \\sigma^ {2}) \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 411, + 797, + 825, + 816 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "where", + "bbox": [ + 171, + 819, + 217, + 830 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\mu = \\ln \\left(\\frac {\\mu_ {\\text {a c t u a l}} ^ {2}}{\\sqrt {\\sigma_ {\\text {a c t u a l}} ^ {2} + \\mu_ {\\text {a c t u a l}} ^ {2}}}\\right), \\quad \\sigma = \\sqrt {\\ln \\left(1 + \\frac {\\sigma_ {\\text {a c t u a l}} ^ {2}}{\\mu_ {\\text {a c t u a l}} ^ {2}}\\right)} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 320, + 830, + 825, + 867 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "For the high-income group $(X\\geq x_{min})$ , wages follow a Pareto distribution:", + "bbox": [ + 169, + 876, + 674, + 892 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\nP (X \\geq x) = C x ^ {- \\alpha}, \\quad x \\geq x _ {\\min } \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 383, + 896, + 825, + 912 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "where $\\alpha$ is the Pareto shape parameter determining the income concentration at the top. The proportion of individuals assigned to each distribution is governed by an empirical threshold ratio, typically set such that $90\\%$ of the population follows the log-normal distribution while $10\\%$ follows the Pareto distribution. This mixture approach provides a flexible yet robust framework for simulating realistic income distributions across diverse economic conditions. We set all the parameters empirically according to previous research and generate the income distribution for 31 regions in China (Hong Kong, Macao, and Taiwan are excluded).", + "bbox": [ + 174, + 92, + 823, + 186 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 491, + 935, + 506, + 946 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "D Questionnaire Design Details", + "text_level": 1, + "bbox": [ + 171, + 89, + 455, + 108 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "We provide the questionnaires here for all three simulations.", + "bbox": [ + 171, + 119, + 568, + 136 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "D.1 Questionnaire for Presidential Election Prediction", + "text_level": 1, + "bbox": [ + 171, + 151, + 565, + 166 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/a66412febde24e12e177a5c22dc9307635f04919238d13f32d8145fd1596adf3.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Q01Voting Behavior
QuestionORDER OF MAJOR PARTY CANDIDATE NAMES
Value Labels1. Democrat first / Republican second2. Republican first / Democrat second
Q02Social Security
QuestionNext I am going to read you a list of federal programs. For each one, I would like you to tell me whether you would like to see spending increased, decreased, or kept the same.What about Social Security? Should federal spending on Social Security be increased, decreased, or kept the same?
Value Labels-2. DK/RF1. Increased2. Decreased3. Kept the same
Q03Education
QuestionWhat about public schools? Should federal spending on public schools be increased, decreased, or kept the same?
Value Labels-2. DK/RF1. Increased2. Decreased3. Kept the same
Q04Immigration
QuestionWhat about tightening border security to prevent illegal immigration? Should federal spending on tightening border security to prevent illegal immigration be increased, decreased, or kept the same?
Value Labels-2. DK/RF1. Increased2. Decreased3. Kept the same
Q05Criminal Justice
QuestionWhat about dealing with crime? Should federal spending on dealing with crime be increased, decreased, or kept the same?
Value Labels-2. DK/RF1. Increased2. Decreased3. Kept the same
Q06Social Welfare
QuestionWhat about welfare programs? Should federal spending on welfare programs be increased, decreased, or kept the same?
Value Labels-2. DK/RF1. Increased2. Decreased3. Kept the same
Q07Infrastructure
QuestionWhat about building and repairing highways? Should federal spending on building and repairing highways be increased, decreased, or kept the same?
Value Labels-2. DK/RF\n1. Increased\n2. Decreased\n3. Kept the same
Q08Aid to Poor
QuestionWhat about aid to the poor? Should federal spending on aid to the poor be increased, decreased, or kept the same?
Value Labels-2. DK/RF\n1. Increased\n2. Decreased\n3. Kept the same
Q09Environment
QuestionWhat about protecting the environment? Should federal spending on protecting the environment be increased, decreased, or kept the same?
Value Labels-2. DK/RF\n1. Increased\n2. Decreased\n3. Kept the same
Q10Government
QuestionHow much do you feel that having elections makes the government pay attention to what the people think?
Value Labels-2. DK/RF\n1. A good deal\n2. Some\n3. Not much
Q11Economy
QuestionWhich party do you think would do a better job of handling the nation's economy?
Value Labels-2. DK/RF\n1. Democrats would do a better job\n2. Not much difference between them\n3. Republicans would do a better job
Q12Health Care
QuestionWhich party do you think would do a better job of handling health care?
Value Labels-2. DK/RF\n1. Democrats would do a better job\n2. Not much difference between them\n3. Republicans would do a better job
Q13Immigration
QuestionWhich party do you think would do a better job of handling immigration?
Value Labels-2. DK/RF\n1. Democrats would do a better job\n2. Not much difference between them\n3. Republicans would do a better job
Q14Taxes
QuestionWhich party do you think would do a better job of handling taxes?
Value Labels-2. DK/RF\n1. Democrats would do a better job\n2. Not much difference between them\n3. Republicans would do a better job
Q15Environment
QuestionWhich party do you think would do a better job of handling the environment?
Value Labels-2. DK/RF\n1. Democrats would do a better job\n2. Not much difference between them\n3. Republicans would do a better job
Q16Education
QuestionSome people think the government should provide fewer services even in areas such as health and education in order to reduce spending.\nOther people feel it is important for the government to provide many more services even if it means an increase in spending.\nAnd, of course, some people have a neutral position.\nWhich of the following best describes your view?
Value Labels-2. DK/RF\n1. Government should provide fewer services\n2. Neutral\n3. Government should provide more services
Q17Defense
QuestionSome people believe that we should spend less money for defense.\nOthers feel that defense spending should be increased.\nAnd, of course, some people have a neutral position.\nWhich of the following best describes your view?
Value Labels-2. DK/RF\n1. Decrease defense spending\n2. Neutral\n3. Increase defense spending
Q18Health Care
QuestionThere is much concern about the rapid rise in medical and hospital costs.\nSome people feel there should be a government insurance plan which would cover all medical and hospital expenses for everyone.\nOthers feel that all medical expenses should be paid by individuals through private insurance plans like Blue Cross or other company paid plans.\nAnd, of course, some people have a neutral position.\nWhich of the following best describes your view?
Value Labels-2. DK/RF\n1. Government insurance plan\n2. Neutral\n3. Private insurance plan
Q19Social Welfare
QuestionSome people feel the government in Washington should see to it that every person has a job and a good standard of living.\nOthers think the government should just let each person get ahead on their own.\nAnd, of course, some people have a neutral position.\nWhich of the following best describes your view?
Value Labels-2. DK/RF\n1. Government should see to jobs and standard of living\n2. Neutral\n3. Government should let each person get ahead on own
Q20Aid to Blacks
QuestionSome people feel that the government in Washington should make every effort to improve the social and economic position of blacks.\nOthers feel that the government should not make any special effort to help blacks because they should help themselves.\nAnd, of course, some people have a neutral position.\nWhich of the following best describes your view?
Value Labels-2. DK/RF\n1. Government should help blacks\n2. Neutral\n3. Blacks should help themselves
Q21Environment
QuestionSome people think we need much tougher government regulations on business in order to protect the environment.\nOthers think that current regulations to protect the environment are already too much of a burden on business.\nAnd, of course, some people have a neutral position.\nWhich of the following best describes your view?
Value Labels-2. DK/RF\n1. Tougher regulations on business needed to protect environment\n2. Neutral\n3. Regulations to protect environment already too much a burden on business
Q22Abortion
QuestionWould you be pleased, upset, or neither pleased nor upset if the Supreme Court reduced abortion rights?
Value Labels-2. DK/RF\n1. Pleased\n2. Upset\n3. Neither pleased nor upset
Q23Criminal Justice
QuestionDo you favor or oppose the death penalty for persons convicted of murder?
Value Labels-2. DK/RF\n1. Favor\n2. Oppose
Q24US Position in World
QuestionDo you agree or disagree with this statement: ‘This country would be better off if we just stayed home and did not concern ourselves with problems in other parts of the world.’
Value Labels-2. DK/RF\n1. Agree\n2. Disagree
Q25US Position in World
QuestionHow willing should the United States be to use military force to solve international problems?
Value Labels-2. DK/RF\n1. Willing\n2. Moderately willing\n3. Not willing
Q26Inequality
QuestionDo you think the difference in incomes between rich people and poor people in the United States today is larger, smaller, or about the same as it was 20 years ago?
Value Labels-2. DK/RF\n1. Larger\n2. Smaller\n3. About the same
Q27Environment
QuestionDo you think the federal government should be doing more about rising temperatures, should be doing less, or is it currently doing the right amount?
Value Labels-2. DK/RF\n1. Should be doing more\n2. Should be doing less\n3. Is currently doing the right amount
Q28Parental Leave
QuestionDo you favor, oppose, or neither favor nor oppose requiring employers to offer paid leave to parents of new children?
Value Labels-2. DK/RF\n1. Favor\n2. Oppose\n3. Neither favor nor oppose
Q29LGBTQ+ Rights
QuestionDo you think business owners who provide wedding-related services should be allowed to refuse services to same-sex couples if same-sex marriage violates their religious beliefs, or do you think business owners should be required to provide services regardless of a couple's sexual orientation?
Value Labels-2. DK/RF\n1. Should be allowed to refuse\n2. Should be required to provide services
Q30LGBTQ+ Rights
QuestionShould transgender people - that is, people who identify themselves as the sex or gender different from the one they were born as - have to use the bathrooms of the gender they were born as, or should they be allowed to use the bathrooms of their identified gender?
Value Labels-2. DK/RF\n1. Have to use the bathrooms of the gender they were born as\n2. Be allowed to use the bathrooms of their identified gender
Q31LGBTQ+ Rights
QuestionDo you favor or oppose laws to protect gays and lesbians against job discrimination?
Value Labels-2. DK/RF\n1. Favor\n2. Oppose
Q32LGBTQ+ Rights
QuestionDo you think gay or lesbian couples should be legally permitted to adopt children?
Value Labels-2. DK/RF\n1. Yes\n2. No
Q33LGBTQ+ Rights
QuestionWhich comes closest to your view? You can just tell me the number of your choice.
Value Labels-2. DK/RF 1. Gay and lesbian couples should be allowed to legally marry\n2. Gay and lesbian couples should be allowed to form civil unions but not legally marry\n3. There should be no legal recognition of gay or lesbian couples' relationship
Q34Immigration
QuestionSome people have proposed that the U.S. Constitution should be changed so that the children of unauthorized immigrants do not automatically get citizenship if they are born in this country.\nDo you favor, oppose, or neither favor nor oppose this proposal?
Value Labels-2. DK/RF\n1. Favor\n2. Oppose\n3. Neither favor nor oppose
Q35Immigration
QuestionWhat should happen to immigrants who were brought to the U.S. illegally as children and have lived here for at least 10 years and graduated high school here? Should they be sent back where they came from, or should they be allowed to live and work in the United States?
Value Labels-2. DK/RF\n1. Should be sent back where they came from\n2. Should be allowed to live and work in the US
Q36Immigration
QuestionDo you favor, oppose, or neither favor nor oppose building a wall on the U.S. border with Mexico?
Value Labels-2. DK/RF\n1. Favor\n2. Oppose\n3. Neither favor nor oppose
Q37Unrest
QuestionDuring the past few months, would you say that most of the actions taken by protestors to get the things they want have been violent, or have most of these actions by protestors been peaceful, or have these actions been equally violent and peaceful?
Value Labels-2. DK/RF\n1. Mostly violent\n2. Mostly peaceful\n3. Equally violent and peaceful
Q38Government
QuestionDo you think it is better when one party controls both the presidency and Congress, better when control is split between the Democrats and Republicans, or doesn’t it matter?
Value Labels-2. DK/RF\n1. Better when one party controls both\n2. Better when control is split\n3. It doesn’t matter
Q39Government
QuestionWould you say the government is pretty much run by a few big interests looking out for themselves or that it is run for the benefit of all the people?
Value Labels-2. DK/RF\n1. Run by a few big interests\n2. For the benefit of all the people
Q40Government
QuestionDo you think that people in government waste a lot of the money we pay in taxes, waste some of it, or don’t waste very much of it?
Value Labels-2. DK/RF\n1. Waste a lot\n2. Waste some\n3. Don’t waste very much
Q41Election Integrity
QuestionDo you favor, oppose, or neither favor nor oppose allowing convicted felons to vote once they complete their sentence?
Value Labels-2. DK/RF\n1. Favor\n2. Oppose\n3. Neither favor nor oppose
Q42Democratic Norms
QuestionHow important is it that news organizations are free to criticize political leaders?
Value Labels-2. DK/RF\n1. Not important\n2. Moderately important\n3. Important
Q43Democratic Norms
QuestionHow important is it that the executive, legislative, and judicial branches of government keep one another from having too much power?
Value Labels-2. DK/RF\n1. Not important\n2. Moderately important\n3. Important
Q44Democratic Norms
QuestionHow important is it that elected officials face serious consequences if they engage in misconduct?
Value Labels-2. DK/RF\n1. Not important\n2. Moderately important\n3. Important
Q45Democratic Norms
QuestionHow important is it that people agree on basic facts even if they disagree politically?
Value Labels-2. DK/RF\n1. Not important\n2. Moderately important\n3. Important
Q46Democratic Norms
QuestionWould it be helpful, harmful, or neither helpful nor harmful if U.S. presidents could work on the country’s problems without paying attention to what Congress and the courts say?
Value Labels-2. DK/RF\n1. Helpful\n2. Harmful\n3. Neither helpful nor harmful
Q47Democratic Norms
QuestionDo you favor, oppose, or neither favor nor oppose elected officials restricting journalists’ access to information about government decision-making?
Value Labels-2. DK/RF\n1. Favor\n2. Oppose\n3. Neither favor nor oppose
Q48Gender Resentment
Question‘Many women interpret innocent remarks or acts as being sexist.’\nDo you agree, neither agree nor disagree, or disagree with this statement?
Value Labels-2. DK/RF/technical error\n1. Agree\n2. Neither agree nor disagree\n3. Disagree
Q49Gender Resentment
Question‘Women seek to gain power by getting control over men.’\nDo you agree, neither agree nor disagree, or disagree with this statement?
Value Labels-2. DK/RF/technical error\n1. Agree\n2. Neither agree nor disagree\n3. Disagree
", + "bbox": [ + 173, + 183, + 857, + 900 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "", + "table_caption": [], + "table_footnote": [], + "bbox": [ + 173, + 87, + 856, + 859 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 20 + }, + { + "type": "table", + "img_path": "", + "table_caption": [], + "table_footnote": [], + "bbox": [ + 173, + 89, + 856, + 898 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 21 + }, + { + "type": "table", + "img_path": "", + "table_caption": [], + "table_footnote": [], + "bbox": [ + 173, + 89, + 856, + 901 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 22 + }, + { + "type": "table", + "img_path": "", + "table_caption": [], + "table_footnote": [], + "bbox": [ + 173, + 89, + 856, + 898 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 23 + }, + { + "type": "table", + "img_path": "", + "table_caption": [], + "table_footnote": [], + "bbox": [ + 173, + 87, + 856, + 858 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 24 + }, + { + "type": "table", + "img_path": "", + "table_caption": [], + "table_footnote": [], + "bbox": [ + 173, + 87, + 857, + 872 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 25 + }, + { + "type": "table", + "img_path": "", + "table_caption": [], + "table_footnote": [], + "bbox": [ + 173, + 89, + 857, + 378 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "D.2 Questionnaire for Breaking News Feedback", + "text_level": 1, + "bbox": [ + 171, + 401, + 519, + 416 + ], + "page_idx": 26 + }, + { + "type": "table", + "img_path": "images/7ce59a581d9686e1bdc3056be8b7786b1ee7c8b56671e62f94b9629946ea1c5e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Q01Public Cognition (PC)
QuestionI have heard of ChatGPT.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q02Public Cognition (PC)
QuestionMany people around me use ChatGPT.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q03Public Cognition (PC)
QuestionI have a deep understanding of ChatGPT's functions and applications.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q04Perceived Risks (PR)
QuestionChatGPT may lead to the widespread dissemination of false information.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q05Perceived Risks (PR)
QuestionChatGPT may reduce human thinking ability and creativity.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q06Perceived Risks (PR)
QuestionThe development of ChatGPT may replace certain jobs, and I am deeply concerned about this.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q07Perceived Benefits (PB)
QuestionChatGPT will definitely improve my work and study efficiency.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q08Perceived Benefits (PB)
QuestionChatGPT helps broaden my knowledge and provides me with new perspectives and ideas.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q09Perceived Benefits (PB)
QuestionChatGPT promotes technological innovation and development in related fields.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q10Trust (TR)
QuestionI fully trust the team developing ChatGPT to manage and guide its development responsibly.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q11Trust (TR)
QuestionI have strong confidence in the accuracy and reliability of the information generated by ChatGPT.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q12Trust (TR)
QuestionI believe that the future application of ChatGPT will be effectively regulated.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q13Fairness (FA)
QuestionThe opportunities to use ChatGPT are distributed fairly among different groups of people.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q14Fairness (FA)
QuestionI find the distribution of benefits brought by ChatGPT to be fair.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q15Fairness (FA)
QuestionI believe that the decision-making process for the development and promotion of ChatGPT is fully transparent and adequately reflects public interests.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q16Public Acceptance (PA)
QuestionOverall, I strongly welcome the emergence of ChatGPT.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q17Public Acceptance (PA)
QuestionI am definitely willing to use ChatGPT in my work or studies.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q18Public Acceptance (PA)
QuestionI strongly support increased investment in the research and development of AI technologies like ChatGPT.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
", + "bbox": [ + 173, + 433, + 857, + 910 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 26 + }, + { + "type": "table", + "img_path": "", + "table_caption": [], + "table_footnote": [], + "bbox": [ + 171, + 87, + 856, + 843 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 27 + }, + { + "type": "table", + "img_path": "", + "table_caption": [], + "table_footnote": [], + "bbox": [ + 171, + 87, + 857, + 893 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 28 + }, + { + "type": "table", + "img_path": "", + "table_caption": [], + "table_footnote": [], + "bbox": [ + 173, + 88, + 854, + 199 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "D.3 Questionnaire for National Economic Survey", + "text_level": 1, + "bbox": [ + 171, + 218, + 529, + 234 + ], + "page_idx": 29 + }, + { + "type": "table", + "img_path": "images/18070cfafc0e2c3f8693e46f9ae0e339394f7c42e0f7d13109ea8126d6c4d3ff.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Q01Food
QuestionWhat is your average monthly expenditure on food (including dining out)? (Unit: CNY)
Value LabelsA. Below 500 CNY\nB. 501-650 CNY\nC. 651-800 CNY\nD. 801-1000 CNY\nE. Above 1000 CNY
Q02Food
QuestionDo you think your current spending on food, tobacco, and alcohol is too high relative to your income?
Value LabelsA. Yes\nB. No\nC. Acceptable
Q03Clothing
QuestionWhat is your average monthly expenditure on clothing (including apparel, shoes, and accessories)? (Unit: CNY)
Value LabelsA. Below 50 CNY\nB. 51-100 CNY\nC. 101-150 CNY\nD. 151-200 CNY\nE. Above 200 CNY
Q04Clothing
QuestionHow much economic pressure do you feel from clothing expenses?
Value LabelsA. Very low, almost no pressure\nB. Moderate, some pressure but manageable\nC. High, requires careful spending\nD. Very high, affects spending in other areas
Q05Household
QuestionWhat is your average monthly housing expenditure? (Including rent, mortgage, property fees, maintenance, etc.) (Unit: CNY)
Value LabelsA. Below 200 CNY\nB. 201-500 CNY\nC. 501-800 CNY\nD. 801-1200 CNY\nE. Above 1200 CNY
Q06Household
QuestionWhat percentage of your monthly income is spent on housing? (Including rent, mortgage, property fees, maintenance, etc.)
Value LabelsA. Below 10% \nB. 10%-20% \nC. 21%-30% \nD. 31%-40% \nE. Above 40%
Q07Daily Service
QuestionWhat is your average monthly expenditure on daily necessities (personal care, house- hold items, cleaning supplies, etc.) and services (housekeeping, repairs, beauty, pet services, etc.)? (Unit: CNY)
Value LabelsA. Below 80 CNY \nB. 81-120 CNY \nC. 121-160 CNY \nD. 161-200 CNY \nE. Above 200 CNY
Q08Transportation & Communication
QuestionWhat is your average monthly expenditure on transportation (public transport, taxis, fuel, parking, etc.) and communication (mobile and internet fees)? (Unit: CNY)
Value LabelsA. Below 200 CNY \nB. 201-300 CNY \nC. 301-400 CNY \nD. 401-500 CNY \nE. Above 500 CNY
Q09Education & Entertainment
QuestionWhat is your average monthly expenditure on education (tuition, training, books, etc.) and cultural entertainment (movies, performances, games, fitness, cultural activities, etc.)? (Unit: CNY)
Value LabelsA. Below 100 CNY \nB. 101-200 CNY \nC. 201-300 CNY \nD. 301-400 CNY \nE. Above 400 CNY
Q10Education & Entertainment
QuestionCan you easily afford your current education, cultural, and entertainment expenses?
Value LabelsA. Yes, spending does not affect other areas \nB. Barely, needs some control \nC. Not really, affects other expenditures \nD. No, it creates significant financial pressure
Q11Medical
QuestionWhat is your average monthly expenditure on healthcare (medications, medical services, health management, etc.)? (Unit: CNY)
Value LabelsA. Below 100 CNY \nB. 101-200 CNY \nC. 201-300 CNY \nD. 301-400 CNY \nE. Above 400 CNY
Q12Medical
QuestionHave you purchased private medical or health insurance for yourself or your family?
Value LabelsA. Yes \nB. Not yet, but planning to \nC. No, and no plans to
Q13Others
QuestionBesides food, clothing, housing, daily necessities and services, transportation, education, culture, and healthcare, what is your average monthly expenditure on other areas (e.g., hobbies, charitable donations, investment, etc.)? (Unit: CNY)
Value LabelsA. Below 30 CNY\nB. 31-60 CNY\nC. 61-90 CNY\nD. 91-120 CNY\nE. Above 120 CNY
Q14Overall
QuestionHow would you evaluate the impact of your current consumption level on your household (or personal) financial situation?
Value LabelsA. Comfortable, can moderately increase spending\nB. Average, can maintain current spending\nC. Tight, need to control or reduce spending\nD. Very tight, affects quality of life
Q15Overall
QuestionDo you feel that your consumption pressure is too high relative to your income level?
Value LabelsA. Yes\nB. No\nC. Not sure
Q16Overall
QuestionIf your income increases, which consumption areas would you most like to expand or improve? (Multiple choices allowed)
Value LabelsA. Food and alcohol\nB. Clothing\nC. Housing\nD. Daily necessities and services\nE. Transportation and communication\nF. Education, culture, and entertainment\nG. Healthcare\nH. Other goods and services
Q17Overall
QuestionWhat is your consumption expectation for the next six months to a year?
Value LabelsA. Will continue to increase\nB. Will remain roughly the same\nC. Will moderately decrease\nD. Uncertain
", + "bbox": [ + 173, + 250, + 857, + 887 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "30", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 29 + }, + { + "type": "table", + "img_path": "", + "table_caption": [], + "table_footnote": [], + "bbox": [ + 173, + 89, + 856, + 905 + ], + "page_idx": 30 + }, + { + "type": "page_number", + "text": "31", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 30 + }, + { + "type": "table", + "img_path": "", + "table_caption": [], + "table_footnote": [], + "bbox": [ + 173, + 87, + 856, + 678 + ], + "page_idx": 31 + }, + { + "type": "page_number", + "text": "32", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 31 + } +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10157/9641b7f1-5ad9-4487-b9c3-7734cccafedc_model.json b/data/2025/2504_10xxx/2504.10157/9641b7f1-5ad9-4487-b9c3-7734cccafedc_model.json new file mode 100644 index 0000000000000000000000000000000000000000..2b89436ae459359bf30c0f66e9505a522e7da852 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/9641b7f1-5ad9-4487-b9c3-7734cccafedc_model.json @@ -0,0 +1,3289 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.283, + 0.058, + 0.717 + ], + "angle": 270, + "content": "arXiv:2504.10157v3 [cs.CL] 15 Jul 2025" + }, + { + "type": "title", + "bbox": [ + 0.197, + 0.123, + 0.803, + 0.196 + ], + "angle": 0, + "content": "SocioVerse: A World Model for Social Simulation Powered by LLM Agents and A Pool of 10 Million Real-World Users" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.245, + 0.816, + 0.307 + ], + "angle": 0, + "content": "Xinnong Zhang\\(^{1,2\\dagger}\\), Jiayu Lin\\(^{1,2\\dagger}\\), Xinyi Mou\\(^{2\\dagger}\\), Shiyue Yang\\(^{2}\\), Xiawei Liu\\(^{2}\\), Libo Sun\\(^{2}\\), Hanjia Lyu\\(^{3}\\), Yihang Yang\\(^{2}\\), Weihong Qi\\(^{4}\\), Yue Chen\\(^{2}\\), Guanying Li\\(^{2}\\), Ling Yan\\(^{5}\\), Yao Hu\\(^{5}\\), Siming Chen\\(^{2}\\), Yu Wang\\(^{2}\\), Xuanjing Huang\\(^{2}\\), Jiebo Luo\\(^{3}\\), Shiping Tang\\(^{2}\\), Libo Wu\\(^{1,2}\\), Baohua Zhou\\(^{2}\\), Zhongyu Wei\\(^{1,2}\\)" + }, + { + "type": "text", + "bbox": [ + 0.283, + 0.306, + 0.714, + 0.363 + ], + "angle": 0, + "content": "\\(^{1}\\)Shanghai Innovation Institute, \\(^{2}\\)Fudan University, \\(^{3}\\)University of Rochester, \\(^{4}\\)Indiana University, \\(^{5}\\)Xiaohongshu Inc. zywei@fudan.edu.cn \nSocioVerse: https://github.com/FudanDISC/SocioVerse" + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.379, + 0.803, + 0.628 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.631, + 0.825, + 0.66 + ], + "angle": 0, + "content": "Figure 1: An illustration of the SocioVerse in the case of Ukraine issue. The alignment challenges are well handled regarding environment, user, scenario, and behavior." + }, + { + "type": "title", + "bbox": [ + 0.46, + 0.675, + 0.538, + 0.691 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.705, + 0.769, + 0.887 + ], + "angle": 0, + "content": "Social simulation is transforming traditional social science research by modeling human behavior through interactions between virtual individuals and their environments. With recent advances in large language models (LLMs), this approach has shown growing potential in capturing individual differences and predicting group behaviors. However, existing methods face alignment challenges related to the environment, target users, interaction mechanisms, and behavioral patterns. To this end, we introduce SocioVerse, an LLM-agent-driven world model for social simulation. Our framework features four powerful alignment components and a user pool of 10 million real individuals. To validate its effectiveness, we conducted large-scale simulation experiments across three distinct domains: politics, news, and economics. Results demonstrate that SocioVerse can reflect large-scale population dynamics while ensuring diversity, credibility, and representativeness through standardized procedures and minimal manual adjustments." + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.897, + 0.472, + 0.912 + ], + "angle": 0, + "content": "These authors contribute equally to this work." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.174, + 0.09, + 0.313, + 0.106 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.133, + 0.827, + 0.203 + ], + "angle": 0, + "content": "The study of human behavior aims to understand how individuals and groups act in various social contexts and serves as a cornerstone of social science research. Traditionally, this has been accomplished using methods such as surveys, interviews, and observations [10, 18, 44]. However, these approaches often encounter challenges, including high costs, limited sample sizes, and ethical concerns. As a result, researchers have resorted to alternative methods for studying human behavior." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.208, + 0.825, + 0.32 + ], + "angle": 0, + "content": "Social simulation has emerged as an effective method for addressing this issue, where researchers use agents to model human behavior, observe their reactions, and translate these findings into insights about human behavior [48, 50]. By assigning behavioral rules to autonomous agents, researchers can explore how micro-level decisions lead to emergent macro-level patterns through the agent-based models [11, 21]. This approach enables capturing specific groups' preferences on particular topics and forecasting potential social dynamics. Furthermore, recent advancements in large language models (LLMs) have significantly enhanced agents' reasoning and decision-making capabilities, enabling them to operate and interact within increasingly realistic and complex environments [3, 35, 37]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.326, + 0.827, + 0.396 + ], + "angle": 0, + "content": "Recent studies have explored social simulation across various levels and scenarios, from mimicking well-known individuals and mirroring specific situations to modeling large-scale social dynamics [4, 29, 34, 36, 49, 60]. However, they share a common challenge: alignment between the simulated environment and the real world, which manifests across multiple dimensions and raises several key questions that remain to be addressed, as shown in Figure 1." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.402, + 0.63, + 0.415 + ], + "angle": 0, + "content": "Q1. How to align the simulated environment with the real world?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.416, + 0.827, + 0.513 + ], + "angle": 0, + "content": "In the real world, new events occur every day and new content is continuously generated. The behavior of real users is rooted in these ever-evolving social contexts and policy agendas. However, the static knowledge of LLMs prevents them from aligning with the dynamic nature of the real-world social environment [2, 15]. There is a gap between the simulated context and the real world, which results in discrepancies between the simulation process and outcomes compared to those in reality. Therefore, it is necessary to establish an update mechanism to keep the simulated environment synchronized with the real world." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.519, + 0.611, + 0.533 + ], + "angle": 0, + "content": "Q2. How to align simulated agents with target users precisely?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.533, + 0.825, + 0.616 + ], + "angle": 0, + "content": "The composition of users in the real world is both complex and diverse, making it impractical to enumerate all users in every scenario. Therefore, it is essential to identify target users whose distribution aligns with that of the users in the corresponding scenario, thereby accurately reflecting the real-world composition and relationships [17, 45]. Based on this, precise target user simulation also requires providing agents with a detailed and comprehensive description of the corresponding users, often involving the integration of high-fidelity demographic, contextual, and behavioral data." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.622, + 0.812, + 0.636 + ], + "angle": 0, + "content": "Q3. How to align the interaction mechanism with the real world among different scenarios?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.636, + 0.827, + 0.733 + ], + "angle": 0, + "content": "The diversity of social interactions presents challenges in social simulation design, requiring deliberate choices regarding the number of individuals, social structures, interaction patterns, and message dissemination mechanisms, to align with the real world. This often results in independently constructed task-specific simulation pipelines performing repetitive work, which reduces their generalizability and scalability [26, 58]. Therefore, there is a need for unified simulation frameworks based on systematic categorization to standardize simulation components and facilitate extensibility across different social scenarios." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.739, + 0.652, + 0.754 + ], + "angle": 0, + "content": "Q4. How to align the behavioral pattern with the real-world groups?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.753, + 0.826, + 0.837 + ], + "angle": 0, + "content": "When the environment perceived by agents, the user composition, and the interaction mechanisms are aligned with the real world, agents are expected to exhibit responses consistent with those of the corresponding real users. However, current LLMs exhibit inherent bias and limitations in such reasoning, failing to infer different types of user behaviors [16, 60]. Therefore, it is necessary to systematically collect behavior-driving factors across different user characteristics and adopt appropriate modeling approaches to effectively capture diverse behavior patterns." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.843, + 0.826, + 0.913 + ], + "angle": 0, + "content": "In this paper, we propose SocioVerse, a world model for social simulation driven by LLM-based agents based on a large-scale real-world user pool. As shown in Figure 2, we design modular components to address the above questions. The Social Environment injects up-to-date and external real-world information into the simulation. The User Engine and Scenario Engine respectively reconstruct realistic user context and orchestrate the simulation process to align the simulation with" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.184, + 0.098, + 0.809, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.412, + 0.825, + 0.469 + ], + "angle": 0, + "content": "Figure 2: An illustration of SocioVerse framework involving 4 powerful parts. The social environment provides an updated context for the simulation. During the simulation, the behavior engine takes the simulation setting, user profiles, and social information from the scenario engine, user engine, and social environment, respectively, and generates the results according to the query." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.51, + 0.825, + 0.54 + ], + "angle": 0, + "content": "the real world. Given this rich contextual setup, the Behavior Engine then drives agents to reproduce human behaviors accordingly." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.544, + 0.825, + 0.616 + ], + "angle": 0, + "content": "To support the framework, we construct a user pool of 10 million individuals by collecting real-world social media data to power the user engine. Comparable in scale to the entire populations of Hungary or Greece, this extensive pool enables diverse and large-scale social simulations. For any customized simulation task, various sampling strategies can be applied to extract target user groups from the pool to support the simulation process." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.62, + 0.827, + 0.704 + ], + "angle": 0, + "content": "We conduct three simulations using the SocioVerse framework, each differing in research domain, user composition, and social environment: (a) presidential election prediction, (b) breaking news feedback, and (c) national economic survey. For each task, we compare the simulation results with real-world situations. Extensive and comprehensive experiments demonstrate that our framework serves as a robust foundation for building standardized and accurate large-scale social simulations. In summary, our key contributions are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.728, + 0.825, + 0.77 + ], + "angle": 0, + "content": "- SocioVerse: We propose a world model for social simulation comprising four powerful alignment modules, enabling diverse and trustworthy social simulations (as illustrated in Figure 2)." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.788, + 0.827, + 0.831 + ], + "angle": 0, + "content": "- 10M User Pool: A user pool of 10 million individuals, constructed from real-world behavioral data, enables large-scale and diverse social simulations, ranging from small interest groups to large citizen communities." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.849, + 0.825, + 0.892 + ], + "angle": 0, + "content": "- Three Illustrative Simulations: We demonstrate the framework's capabilities through three distinct scenarios: presidential election prediction, breaking news feedback, and a national economic survey, providing a foundation for future research." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.728, + 0.827, + 0.892 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.09, + 0.28, + 0.106 + ], + "angle": 0, + "content": "2 Methods" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.122, + 0.317, + 0.136 + ], + "angle": 0, + "content": "Overall Framework" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.149, + 0.828, + 0.22 + ], + "angle": 0, + "content": "The SocioVerse framework follows a structured pipeline to achieve realistic social simulation results, as shown in Figure 2: (1) Social Environment collects updated information and contextual knowledge. Within the simulation environment, (2) User Engine aligns the simulated agents with target users, (3) Scenario Engine aligns the interaction structure with diverse scenarios, and (4) Behavior Engine aligns the behavioral pattern with real-world target groups." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.238, + 0.352, + 0.251 + ], + "angle": 0, + "content": "2.1 Social Environment" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.264, + 0.827, + 0.307 + ], + "angle": 0, + "content": "Function The social environment provides event-related context to align the simulation environment with real-world conditions. By integrating up-to-date events, social statistics, and preference content into LLM-based agents, it enhances the realism of the simulation and improve agent decision-making." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.323, + 0.825, + 0.366 + ], + "angle": 0, + "content": "Components The social environment should encompass as much real-world social, cultural, and technological context as possible. It can be broadly categorized into three types: social structural information, social dynamic information, and personalized context." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.371, + 0.826, + 0.456 + ], + "angle": 0, + "content": "Social Structure: Social structural information provides agents with a rich knowledge base encompassing demographic distributions, cultural norms, urban infrastructures, and collective behavior patterns [57]. This data allows agents to behave in a way that aligns with the typical characteristics of their assigned demographic or geographic profile. For example, by incorporating regional dialect preferences, work-life habits, and common social values, the simulation can more accurately reflect public discourse trends, mobility behaviors, and economic interactions." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.461, + 0.825, + 0.532 + ], + "angle": 0, + "content": "Social Dynamics: Social dynamics encompass time-sensitive content continuously generated in the real world, such as news events and policy changes. Typically, this engine maintains an up-to-date event base to continuously collect real-world event news from mainstream news, and all the news articles contain time stamps and event-related tags so that LLM-based agents can comb through the timeline of the events and react accordingly [37]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.537, + 0.826, + 0.607 + ], + "angle": 0, + "content": "Personalized Context: In addition to the macro social environment, individuals also receive different personalized information feeds. Previous studies have explored that the recommendation system can enhance the behavior diversity of the agent [31, 56, 60]. Consequently, the preference content component constructs relevant posts and pushes them to agents according to their social interaction network and interesting topics." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.625, + 0.3, + 0.64 + ], + "angle": 0, + "content": "2.2 User Engine" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.651, + 0.827, + 0.681 + ], + "angle": 0, + "content": "Function The user engine aligns simulated agents with a rich set of real-world user samples, enabling the creation of complex target users within the simulation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.697, + 0.825, + 0.725 + ], + "angle": 0, + "content": "Components To support diverse user composition and effective user retrieval and description, the user engine incorporates a large user pool and a wide range of user labels." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.731, + 0.827, + 0.87 + ], + "angle": 0, + "content": "User Pools: The user pool is designed to collect extensive digital footprints of individuals across social media platforms, enabling a more comprehensive characterization of real-world behavioral patterns and expression tendencies. To this end, we constructed a user pool covering a variety of social media platforms, including \\(\\mathbf{X}^1\\) and Rednote2. Anomalous data, such as advertising and bot-generated content, is filtered by calculating the post frequency and average text similarity. The detailed procedure can be found in Appendix A. We index users and construct a user pool of 10 million users based on the collected social media posts. Formally, we define user pool as: \\(UserPool = \\{U_i, P_i \\mid i \\in \\mathbb{S}\\}\\), where the \\(i\\)-th user \\(U_i\\) derives from the collection of social media platforms \\(\\mathbb{S}\\) with his/her related posts \\(P_i = \\{P_{i,1}, P_{i,2}, \\ldots\\}\\). The statistical summary of the user pool is provided in Table 1." + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.884, + 0.312, + 0.898 + ], + "angle": 0, + "content": "1https://x.com/" + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.898, + 0.419, + 0.912 + ], + "angle": 0, + "content": "\\(^{2}\\)https://www.xiaohongshu.com/" + }, + { + "type": "list", + "bbox": [ + 0.193, + 0.884, + 0.419, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.37, + 0.089, + 0.626, + 0.153 + ], + "angle": 0, + "content": "
Source# Users# Posts
X1,006,51730,195,510
Rednote9,158,40440,963,735
" + }, + { + "type": "table_caption", + "bbox": [ + 0.33, + 0.158, + 0.666, + 0.173 + ], + "angle": 0, + "content": "Table 1: Statistical summary of the 10M user pool." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.192, + 0.828, + 0.358 + ], + "angle": 0, + "content": "User Labels: User labels refer to the tagging and description of users, which can be represented using discrete attributes or continuous representation. Demographic descriptions of users are the most commonly used form of labeling. However, they are often not directly accessible. Therefore, we designed a demographic annotation system to infer and label user attributes. The process begins with multiple LLMs serving as initial annotators, classifying users across various demographic dimensions. Human annotators then evaluate and refine the LLM-generated labels, ensuring the reliability of the user tags dataset. The curated dataset is subsequently used to train demographic classifiers, enabling large-scale annotation in a cost-effective manner. Specifically, we annotate users across 15 demographic dimensions: age, gender, vocation, race, income, education, settlement type, region, employment, marital status, religious, party, ideology, BigFive personality, and hobbies. Each attribute is inferred by a specialized classifier trained on the corresponding subset of the user tags dataset. See Appendix B for further details." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.374, + 0.329, + 0.388 + ], + "angle": 0, + "content": "2.3 Scenario Engine" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.399, + 0.825, + 0.442 + ], + "angle": 0, + "content": "Function The scenario engine aligns various simulation structures with real-world contexts based on specific task formulations and scenario types, and then scales individual simulations by sampling according to demographic distributions provided by the user engine." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.456, + 0.826, + 0.498 + ], + "angle": 0, + "content": "Components The scenario engine formulates a wide range of real-world social situations, which can be summarized as archetypal scenario templates, including questionnaires, in-depth interviews, behavior experiments, and social media interaction." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.504, + 0.825, + 0.547 + ], + "angle": 0, + "content": "Questionnaire: The questionnaire scenario constructs the simulation in a 1-to-N manner, with one designed scale or questionnaire answered by multiple target users in a single round. This scenario is suitable for massive social investigation on specific topics, like election polls." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.552, + 0.826, + 0.636 + ], + "angle": 0, + "content": "Inddepth Interview: The in-depth interview scenario follows a 1-to-1 structure, where a simulated interviewer engages with an individual target user through multiple interaction rounds [43]. This iterative process allows for probing deeper into responses, clarifying ambiguities, and exploring underlying motivations. Such simulations are particularly useful for qualitative research on user experiences, psychological assessments, and exploratory studies where nuanced responses and detailed reasoning are essential." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.642, + 0.826, + 0.726 + ], + "angle": 0, + "content": "Behavior Experiment: The behavior experiment scenario is typically conducted in a 1-to-N or N-to-N format, depending on whether individual or group interactions are being studied [8, 42]. Simulated users are exposed to controlled conditions where their behavioral responses are observed across multiple rounds of interaction. These simulations help researchers examine decision-making processes, social influences, and cognitive biases in various experimental setups, such as consumer behavior studies or cooperative game simulations." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.731, + 0.826, + 0.815 + ], + "angle": 0, + "content": "Social Media Interaction: The social media interaction scenario adopts an N-to-N structure, where multiple simulated users engage in dynamic, multi-round exchanges in an online setting [30]. This scenario captures real-time interactions, including content sharing, comment threads, and viral spread dynamics, allowing researchers to analyze public discourse, opinion shifts, and information diffusion on social platforms. It is particularly valuable for studying trends in misinformation, political discussions, and network-based influence propagation." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.831, + 0.33, + 0.845 + ], + "angle": 0, + "content": "2.4 Behavior Engine" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.826, + 0.911 + ], + "angle": 0, + "content": "Function The behavior engine aims to align the behaviors of the agents with that of real users. The behavior engine integrates user history and experience from the user engine, the interaction mechanism from the scenario engine and social context from the social environment to predict the behavior of each individual." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.121 + ], + "angle": 0, + "content": "Components To achieve credible behavior simulation, the behavior engine needs to provide a robust simulation foundation, including traditional agent-based models and a series of LLMs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.127, + 0.825, + 0.21 + ], + "angle": 0, + "content": "Traditional Agent-Based Modeling: Traditional agent-based modeling (ABM) relies on rule-based and mathematical models [9, 23, 32, 47, 52], where interactions among agents are typically realized through the broadcasting of predefined values. These values are derived from heuristic functions or theoretical mathematical formulations. Traditional ABM approaches are highly scalable and computationally efficient, making them well-suited for simulating large populations, especially marginal users with relatively limited influence." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.216, + 0.827, + 0.327 + ], + "angle": 0, + "content": "LLM-powered Agents: LLMs leverage their role-playing capabilities to simulate user-generated content, and the abilities can be activated through various methods [29, 36, 51, 61-64]. Specifically, the behavior engine can be powered by general LLMs, expert LLMs, and domain-specific LLMs. Through non-parametric prompting, powerful general LLMs (e.g., GPT series and Qwen series) can act in accordance with predefined user profiles. Expert and domain-specific LLMs are acquired through parametric training, e.g., continual pretraining, supervised fine-tuning, and reinforcement learning. When target users exhibit complex profiles and the simulation requires deep domain expertise, these models are leveraged to enhance the professionalism and accuracy of agent behaviors." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.348, + 0.516, + 0.365 + ], + "angle": 0, + "content": "3 Implementation of Specific Scenarios" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.379, + 0.825, + 0.45 + ], + "angle": 0, + "content": "We implement three representative social simulation scenarios through the SocioVerse framework based on the implemented components: (a) presidential election prediction of America, (b) breaking news feedback analysis, and (c) national economic survey of China. These scenarios respectively address political communication, journalistic dissemination, and socioeconomic domains, demonstrating the framework's generalizability through standardized implementation pipelines." + }, + { + "type": "image", + "bbox": [ + 0.183, + 0.464, + 0.391, + 0.582 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.403, + 0.464, + 0.608, + 0.582 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.622, + 0.464, + 0.818, + 0.582 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.59, + 0.825, + 0.619 + ], + "angle": 0, + "content": "Figure 3: Illustration of three scenarios representing (a) presidential election prediction, (b) breaking news feedback, and (c) national economic survey." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.646, + 0.515, + 0.66 + ], + "angle": 0, + "content": "3.1 Presidential Election Prediction of America" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.672, + 0.825, + 0.755 + ], + "angle": 0, + "content": "Task Description Presidential elections remain central to public engagement and party strategy formation [6, 46]. This study analyzes methods for large-scale election simulation using LLMs through the U.S. presidential system's Electoral College framework. In this indirect voting system, citizens vote for state electors (allocated by congressional representation) who formally elect the president. Most states employ a winner-takes-all allocation of electoral votes to the statewide majority winner, with our modeling focused on predicting these state-level outcomes." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.771, + 0.825, + 0.869 + ], + "angle": 0, + "content": "Target Group Distribution Extensive research has documented the influence of demographic factors on election outcomes [33, 53]. We model U.S. demographic and ideological diversity through integrated Census Bureau (2022 voting/registration) and ANES (2020) data [1]. This scenario incorporates 12 attributes from the user engine: socioeconomic (income, education, employment), geographic (region, area), and political (party, ideology) dimensions alongside demographic factors (age, gender, race, marital status, and religious status). Given available marginal distributions, we employ iterative proportional fitting (IPF) to synthesize agent populations, see Appendix C.1." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.884, + 0.825, + 0.913 + ], + "angle": 0, + "content": "Questionnaire Design We design the presidential election questionnaire based on abundant polls conducted by various media and research institutes [5, 24], incorporating both significant issues and" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.937, + 0.504, + 0.947 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.123 + ], + "angle": 0, + "content": "voter preferences. These elements are then optimized into proper forms for LLM-based agents by the scenario engine. The entire questionnaire can be found in Appendix D.1." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.134, + 0.827, + 0.205 + ], + "angle": 0, + "content": "Evaluation Metric Two metrics are used to comprehensively compare the simulated election results to the real-world results. (1) Accuracy rate (Acc) is measured by calculating the proportion of states for which the election simulation results align with the actual result, serving as a coarse-grained evaluation metric. (2) Root Mean Square Error (RMSE) is measured by calculating the simulated vote share and the actual vote share for each state, which serves as a fine-grained evaluation metric." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.22, + 0.391, + 0.235 + ], + "angle": 0, + "content": "3.2 Breaking News Feedback" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.245, + 0.828, + 0.318 + ], + "angle": 0, + "content": "Task Description Journalism plays a crucial role in shaping public perception and opinion through agenda-setting, framing, and information dissemination [20, 55]. Online social media platforms have gradually replaced the influence of traditional paper media. When breaking news is released on social media platforms, its potential audience may hold different stances. We take the release of ChatGPT as our target news to evaluate the accuracy and foreseeability of public attitudes." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.329, + 0.826, + 0.442 + ], + "angle": 0, + "content": "Target Group Distribution We define all Rednote users in our pool as the universal set, identifying technology-interested users as the potential audience set \\(\\mathbb{P}\\), and those discussing ChatGPT via keyword matching as the ground truth set \\(\\mathbb{G}\\), with \\(\\mathbb{G} \\subset \\mathbb{P} \\subset UserPool\\). Context is limited to pre-news timeframes to prevent leakage. Using the potential audience distribution as prior, we sample agents with identical distribution sampling (IDS) as \\(D_{s} = IDS(UserPool, \\mathbb{P})\\), see Appendix C.2), considering demographics (gender, age, education, and consumption level) during sampling the user pools. Based on this, the task is to compare the consistency between the agents' attitudes toward news and those of the users in the ground truth set." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.453, + 0.825, + 0.526 + ], + "angle": 0, + "content": "Questionnaire Design We design the cognitive questionnaire using the ABC attitude model (Affect, Behavior, Cognition) [28], which outlines attitude formation as a hierarchy: cognition affects emotions, guiding behavior. Combined with a 5-point Likert scale [22], the questionnaire covers six dimensions: public cognition (PC), perceived risks (PR), perceived benefits (PB), trust (TR), fairness (FA), and public acceptance (PA). See Appendix D.2 for details." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.537, + 0.825, + 0.609 + ], + "angle": 0, + "content": "Evaluation Metric Agents from both sets answer the questionnaire for paired responses. Two evaluation dimensions assess feedback: (1) Normalized RMSE (NRMSE) measures point-wise differences between simulated and ground truth answers across PC, PR, PB, TR, FA, and PA as value evaluations; (2) KL-divergence (KL-Div) compares the 6-dimensional answer distributions between groups as consistency evaluations." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.623, + 0.465, + 0.639 + ], + "angle": 0, + "content": "3.3 National Economic Survey of China" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.648, + 0.826, + 0.748 + ], + "angle": 0, + "content": "Task Description Economic simulation is another crucial part of massive social simulations as it models resource distribution, market dynamics, and financial behaviors, providing insights into economic stability and policy impacts [13, 54]. By integrating economic factors with social interactions, it enhances the prediction of systemic outcomes, guiding decision-making in areas such as governance, urban planning, and crisis management. We follow a national economic survey conducted by the National Bureau of Statistics of China, which interviews Chinese citizens on their monthly spending given the average salary of each province in China." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.759, + 0.825, + 0.83 + ], + "angle": 0, + "content": "Target Group Distribution The prior distribution is based on the methodology from the National Bureau of Statistics of China, which takes 160,000 families nationwide and calculates their incomes and spending as the national average statistics [39]. We sample nationwide agents from our user pool proportionally according to their region population and generate their income distribution according to the regional average income [38]. The detailed method can be referred to in Appendix C.3." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.843, + 0.829, + 0.915 + ], + "angle": 0, + "content": "Questionnaire Design Spending details in China Statistical Yearbook 2024 [40] are categorized into eight parts, i.e. food, clothing, housing, daily necessities & services, communication & transportation, education & entertainment, healthcare, and others. Consequently, the questionnaire design covers the above categories with examples and uses segmented interval options in each question. The entire questionnaire can be referred to in Appendix D.3." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.149 + ], + "angle": 0, + "content": "Evaluation Metric Both value evaluation and distribution evaluation are involved in the national economic survey as well. (1) NRMSE of the nine categories is measured between the simulated results and official statistics. (2) KL-Div is measured by taking the 8-item spending as a distribution to evaluate the consistency between the simulation and the real world." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.169, + 0.269, + 0.184 + ], + "angle": 0, + "content": "4 Results" + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.2, + 0.729, + 0.215 + ], + "angle": 0, + "content": "4.1 SocioVerse Can Support Diverse and Accurate Massive Social Simulations" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.233, + 0.825, + 0.297 + ], + "angle": 0, + "content": "
Scenario# Agents# DemographicsTypeSamplingSourceLanguage# QuestionsGround truth
PresElectPredict331,83612labelIPFXEN49real world
BreakNewsFeed20,0007labelIDSrednoteZH18calculated
NatEconSurvey16,0009label+numberIDSrednoteZH17real world
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.301, + 0.825, + 0.346 + ], + "angle": 0, + "content": "Table 2: Detail settings of three simulation scenarios, where PresElectPredict, BreakNewsFeed, and NatEconSurvey denote three simulations mentioned in the paper, respectively. IPF and IDS denote iterative proportional fitting and identical distribution sampling, see Appendix C." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.363, + 0.827, + 0.406 + ], + "angle": 0, + "content": "Experiment Settings We select powerful LLMs from different model families. For open-sourced models, we select Llama-3-70b-Instruct [14], Qwen2.5-72b-Instruct [59], DeepSeek-R1-671b [19], and DeepSeek-V3 [27]. For commercial models, we select GPT-4o\\(^3\\) [41] and GPT-4o-mini\\(^4\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.411, + 0.826, + 0.509 + ], + "angle": 0, + "content": "We compare the settings of all three scenarios for better understanding, which is shown in Table 2. As the Presidential Election Prediction covers a 1-in-1,000 sample of the U.S. population, GPT-4o is excluded from comparison due to cost constraints. In terms of local model serving, Qwen2.5-72b-Instruct and Llama3-70b-Instruct models are both deployed on 8 NVIDIA RTX4090 GPUs via vLLM [25]. We set max tokens to 2,048 for all models to enable chain-of-thoughts during the generation and the temperature is set to 0.7 to encourage diversity. Implementation details for user pool construction and demographics annotation can be found in Appendix A and B." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.522, + 0.825, + 0.657 + ], + "angle": 0, + "content": "
ModelPresElectPredictBreakNewsFeedNatEconSurvey
OverallBattlegroundOverallDeveloped-region
Acc↑RMSE↓Acc↑RMSE↓KL-Div↓RMSE↓KL-Div↓RMSE↓KL-Div↓RMSE↓
Llama3-70b0.8430.0640.7330.0450.6680.1990.0160.0260.0130.025
Qwen2.5-72b0.9220.0370.8000.0310.1130.0590.0660.0480.0430.039
DeepSeek-R1-671b\\\\0.6700.0650.3830.0820.0590.0450.0450.036
DeepSeek-V30.9220.0460.8670.0410.2630.0720.0350.0360.0230.030
GPT-4o-mini\\\\0.8000.0390.1950.1140.0460.0450.0300.036
GPT-4o\\\\\\\\0.1960.0550.0620.0510.0360.038
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.66, + 0.825, + 0.703 + ], + "angle": 0, + "content": "Table 3: Overall results of the three scenarios, where subset Battleground indicates battleground states in the U.S. in the presidential election and subset Developed-Region indicates top-10 developed regions in China in terms of GDP." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.722, + 0.825, + 0.751 + ], + "angle": 0, + "content": "Results The overall simulation results of the three scenarios are shown in Table 3. We also report subset results for presidential election prediction and national economic survey." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.761, + 0.827, + 0.873 + ], + "angle": 0, + "content": "- Presidential Election Prediction We report the overall results and the battleground states' results separately. The prediction of battleground states is challenging even in the real world and thus becomes the focus during the election process. According to the results, GPT-4o-mini and Qwen2.5-72b show competitive performance both in Acc and RMSE. Typically, according to the winner-takes-all rule, over \\(90\\%\\) state voting results are predicted correctly, which means the simulation achieves a high-precision macroscopic reduction of the real-world election results. After the case study, we find that DeepSeek-R1-671b sometimes falls into overthinking, resulting in less accurate results." + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.884, + 0.335, + 0.898 + ], + "angle": 0, + "content": "3gpt-4o-2024-08-06" + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.897, + 0.373, + 0.912 + ], + "angle": 0, + "content": "4gpt-4o-mini-2024-07-18" + }, + { + "type": "list", + "bbox": [ + 0.193, + 0.884, + 0.373, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.217, + 0.092, + 0.825, + 0.176 + ], + "angle": 0, + "content": "- Breaking News Feedback The results measure the overall consistency of each model compared with the real-world users' reactions and attitudes. To this end, the performances of GPT-4o and Qwen2.5-72b are more aligned with real-world perspectives than other models in terms of KL-Div and NRMSE, respectively, and the following detailed analysis will demonstrate that the models consistently capture and accurately predict public trends and opinions." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.181, + 0.827, + 0.267 + ], + "angle": 0, + "content": "- National Economic Survey We report the overall results and results for the top 10 regions by GDP (i.e., developed regions) separately. Generally, all the models closely align with real-world statistics. Llama3-70b shows a significant superiority over other models in the economic survey scenario and all the models perform better in the 1st-Region subset than overall. The results demonstrate that individuals' spending habits can be accurately reproduced under the SocioVerse framework, especially in developed regions." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.092, + 0.827, + 0.267 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.276, + 0.828, + 0.348 + ], + "angle": 0, + "content": "The overall results from both value evaluation and distribution evaluation of three simulations sufficiently prove that SocioVerse can support diverse and accurate massive social simulations with a standard pipeline and minimal changes with human experts in the loop. However, the choice of underlying LLMs can affect simulation precision across different scenarios, highlighting the need for further study." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.363, + 0.807, + 0.394 + ], + "angle": 0, + "content": "4.2 Prior Distribution and Real-World Knowledge Can Enhance Simulation Accuracy in Presidential Election Predictions" + }, + { + "type": "table", + "bbox": [ + 0.337, + 0.409, + 0.663, + 0.574 + ], + "angle": 0, + "content": "
ModelAcc↑RMSE↓
Llama3-70b0.7330.045
- w/o Knowledge0.5330.051
- w/o Knowledge & Piror Distribution0.6000.386
Qwen2.5-72b0.8000.031
- w/o Knowledge0.8000.033
- w/o Knowledge & Piror Distribution0.6000.370
GPT-4o-mini0.8000.039
- w/o Knowledge0.8000.052
- w/o Knowledge & Piror Distribution0.6670.323
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.579, + 0.825, + 0.622 + ], + "angle": 0, + "content": "Table 4: Ablation experiment results on the presidential election prediction simulation, where -w/o Knowledge denotes without real-world user knowledge and -w/o Piror Distribution denotes using random demographics distribution." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.633, + 0.827, + 0.731 + ], + "angle": 0, + "content": "We conduct an ablation study on the presidential election prediction simulation to assess the impact of prior demographics distribution and real-world user knowledge. As shown in Table 4, prior demographics distribution significantly improves the accuracy of the simulation in both Acc and RMSE compared to random demographics distribution. Additionally, past posts from users on social media platforms improve the fine-grained performance, especially for Llama3-70b in Acc and all the models in RMSE. We can tell from the ablation study that both prior distribution and real-world knowledge in the SocioVerse pipeline are significant during the simulation." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.748, + 0.817, + 0.765 + ], + "angle": 0, + "content": "4.3 Group Preference and Perspectives Can Be Well Reflected in Breaking News Feedback" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.773, + 0.827, + 0.913 + ], + "angle": 0, + "content": "During the Breaking News Feedback simulation, the core concern is whether the preferences and perspectives of the target group are well captured and reflected in the results. We reformulate the original questionnaire into the Likert 6-dimension scale ranging from 1 to 5 points, representing from totally disagree to totally agree. As the ground truth of the simulation is calculated by prompting LLM agents from the ground truth set, the simulated and real results are paired for each model, as shown in Figure 4. All the models powered by the potential audience set during the simulation tend to behave consistently with the ground truth users. However, Llama3-70b perform poorly with a larger gap between the simulated and real results than other models. GPT-4o-mini shows different attitudes in the fairness (FA) and public acceptance (PA) dimensions, which may be because the news is related to OpenAI. Another trend indicates that, generally, all the models perform more" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.176, + 0.089, + 0.825, + 0.19 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.198, + 0.828, + 0.242 + ], + "angle": 0, + "content": "Figure 4: An illustration of the performances of the breaking news feedback simulation, where PC, PR, PB, TR, FA, and PA denote six dimensions from the Likert scale (see §3.2 questionnaire design), with 1-point standing for totally disagree and 5-point for totally agree." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.269, + 0.825, + 0.299 + ], + "angle": 0, + "content": "disagreeably in the simulated results than the real results, which also underlines the potential risk of biases during the public opinion simulation." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.316, + 0.79, + 0.332 + ], + "angle": 0, + "content": "4.4 The Capabilities of LLMs Vary in Different Domains in National Economic Survey" + }, + { + "type": "table", + "bbox": [ + 0.24, + 0.35, + 0.758, + 0.476 + ], + "angle": 0, + "content": "
ItemLlama3-70bQwen2.5-72bGPT-4o-miniGPT-4oDeepSeek-R1
Daily0.0070.0090.0060.0100.009
Clothing0.0120.0150.0190.0150.015
Transportation_Comunication0.0160.0200.0270.0230.017
Education_Entertainment0.0180.0220.0240.0170.022
Medical0.0230.0620.0410.0570.060
Food0.0370.0310.0310.0400.032
Household0.0520.1100.1070.1200.102
Others0.0080.0080.0100.0050.009
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.48, + 0.825, + 0.522 + ], + "angle": 0, + "content": "Table 5: Detailed results on the national economic survey simulation reported in NRMSE, where the Item column indicates the components of spending. The best results are **bolded*; the second-best results are underlined." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.532, + 0.826, + 0.631 + ], + "angle": 0, + "content": "The simulation of the national economic survey covers 8 spending dimensions. The overall results in Table 3 show the average performance of these dimensions, while model performances among these dimensions can also vary. We calculate the averaged NRMSE of 31 regions on each spending level, as shown in Table 5. It is worth mentioning that all the models show high consistency. Eliminating the others item, all the models perform best on daily necessities spending planning and worst on housing spending, which can reveal the LLM's preference on the economic decision-making and highlight the challenge in housing spending strategy." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.651, + 0.295, + 0.668 + ], + "angle": 0, + "content": "5 Discussion" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.684, + 0.827, + 0.795 + ], + "angle": 0, + "content": "In this study, we introduce a generalized social simulation framework SocioVerse and evaluated its performance across three distinct real-world scenarios. Our findings indicate that state-of-the-art LLMs demonstrate a notable ability to simulate human responses in complex social contexts, although some gaps still remain between the simulated response and observed real-world outcomes. Therefore, future research may need to incorporate a broader range of scenarios and develop more fine-grained evaluations built upon the current analytic engine, to further explore and expand the boundaries of LLMs' simulation capabilities. Such efforts could pave the way for establishing LLMs as comprehensive and reliable tools for large-scale social simulation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.801, + 0.827, + 0.913 + ], + "angle": 0, + "content": "We observed several key patterns across the simulations of the scenarios. First, incorporating demographic distributions and users' historical experiences significantly improved simulation accuracy. These findings highlight the importance of building a large, demographically rich user pool, complemented by a multi-dimensional user tagging system for more precise modeling of group-specific behaviors. Second, under consistent measurement protocols, LLMs produced broadly similar simulations of human attitudes and ideologies. However, certain models, such as GPT-4o-mini, showed notable inconsistencies, indicating that model-specific preferences or biases remain influential and warrant closer scrutiny in future work. Finally, we found that while LLMs perform well in simple daily" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.175, + 0.092, + 0.825, + 0.12 + ], + "angle": 0, + "content": "scenarios, they underperform in complex situations requiring contextual knowledge, underscoring the need to align model behavior with real-world experiences and social contexts." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.126, + 0.827, + 0.265 + ], + "angle": 0, + "content": "Notably, the current version has only implemented part of our framework, indicating significant potential for enhancing the accuracy and quality of social simulations. Future work can focus on refining each module for better collaboration, enabling the framework to achieve its full potential. For instance, the incorporation of the social environment can inject up-to-date knowledge into LLMs, enhancing the understanding of social dynamics. The scenario engine can not only provide survey-based simulation but also expand to diverse formats such as social interviews and free interactions. Additionally, further optimization of the general LLMs and expert LLMs adaptation in the behavior engine will enable better accommodation of complex target user groups, such as minority groups and individuals with special disabilities. The analysis engine can introduce an autonomous planning module to improve the overall credibility of simulation results." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.271, + 0.827, + 0.396 + ], + "angle": 0, + "content": "Beyond the social simulation framework, our work underscores the potential to bridge the gap between autonomous AI systems and traditional social science, offering social scientists a seamless, cost-effective tool for conducting social experiments with minimal setup. Such tools not only help analyze and validate psychological and sociological theories or hypotheses, such as behavioral economics and social identity theory, but also assist in predicting large-scale social impacts like policy changes, social movements, or public health crises. By providing an efficient and scalable simulation environment, our framework is not just a research tool, but an experimental platform for exploring the dynamic changes and long-term trends of virtual societies, with the aim of becoming a realistic mapping for real-world societies." + }, + { + "type": "title", + "bbox": [ + 0.175, + 0.415, + 0.331, + 0.433 + ], + "angle": 0, + "content": "Acknowledgement" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.446, + 0.825, + 0.489 + ], + "angle": 0, + "content": "We would like to express our sincere gratitude to Professor Rongwei Chu and his research team for their invaluable support in this work. The project's computational resources are supported by the CFFF platform of Fudan University." + }, + { + "type": "page_number", + "bbox": [ + 0.492, + 0.936, + 0.507, + 0.947 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.174, + 0.09, + 0.27, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.113, + 0.826, + 0.14 + ], + "angle": 0, + "content": "[1] American National Election Studies. Anes 2020 time series study full release [dataset and documentation], 2021. February 10, 2022 version." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.149, + 0.826, + 0.186 + ], + "angle": 0, + "content": "[2] J. R. Anthis, R. Liu, S. M. Richardson, A. C. Kozlowski, B. Koch, J. Evans, E. Brynjolfsson, and M. Bernstein. LIm social simulations are a promising research method. arXiv preprint arXiv:2504.02234, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.198, + 0.825, + 0.224 + ], + "angle": 0, + "content": "[3] L. P. Argyle, E. C. Busby, N. Fulda, J. R. Gubler, C. Rytting, and D. Wingate. Out of one, many: Using language models to simulate human samples. Political Analysis, 31(3):337-351, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.233, + 0.825, + 0.272 + ], + "angle": 0, + "content": "[4] Z. Bao, Q. Liu, Y. Guo, Z. Ye, J. Shen, S. Xie, J. Peng, X. Huang, and Z. Wei. Piers: Personalized intelligent outpatient reception based on large language model with multi-agents medical scenario simulation. arXiv preprint arXiv:2411.13902, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.282, + 0.825, + 0.308 + ], + "angle": 0, + "content": "[5] A. Barnett and A. Sarfati. The polls and the us presidential election in 2020.... and 2024. Statistics and Public Policy, 10(1):2199809, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.318, + 0.826, + 0.345 + ], + "angle": 0, + "content": "[6] L. M. Bartels. Uninformed votes: Information effects in presidential elections. American journal of political science, pages 194-230, 1996." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.355, + 0.825, + 0.38 + ], + "angle": 0, + "content": "[7] I. Beltagy, M. E. Peters, and A. Cohan. Longformer: The long-document transformer. arXiv preprint arXiv:2004.05150, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.39, + 0.825, + 0.417 + ], + "angle": 0, + "content": "[8] A. K. Chandra, D. C. Kozen, and L. J. Stockmeyer. Alternation. Journal of the Association for Computing Machinery, 28(1):114-133, 1981." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.426, + 0.825, + 0.453 + ], + "angle": 0, + "content": "[9] Y.-S. Chuang and T. T. Rogers. Computational agent-based models in opinion dynamics: A survey on social simulations and empirical studies. arXiv preprint arXiv:2306.03446, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.462, + 0.825, + 0.501 + ], + "angle": 0, + "content": "[10] V. Cologna, N. G. Mede, S. Berger, J. Besley, C. Brick, M. Joubert, E. W. Maibach, S. Mihelj, N. Oreskes, M. S. Schäfer, et al. Trust in scientists and their role in society across 68 countries. Nature Human Behaviour, pages 1–18, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.51, + 0.528, + 0.524 + ], + "angle": 0, + "content": "[11] T. Connolly. Micromotives and macrobehavior., 1979." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.535, + 0.825, + 0.561 + ], + "angle": 0, + "content": "[12] J. Devlin, M.-W. Chang, K. Lee, and K. Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.57, + 0.825, + 0.609 + ], + "angle": 0, + "content": "[13] F. Dignum, V. Dignum, P. Davidsson, A. Ghorbani, M. van der Hurk, M. Jensen, C. Kammler, F. Lorig, L. G. Ludescher, A. Melchior, et al. Analysing the combined health, social and economic impacts of the coronavirus pandemic using agent-based social simulation. *Minds and Machines*, 30:177–194, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.619, + 0.825, + 0.646 + ], + "angle": 0, + "content": "[14] A. Dubey, A. Jauhri, A. Pandey, A. Kadian, A. Al-Dahle, A. Letman, A. Mathur, A. Schelten, A. Yang, A. Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.655, + 0.825, + 0.693 + ], + "angle": 0, + "content": "[15] C. Gao, X. Lan, N. Li, Y. Yuan, J. Ding, Z. Zhou, F. Xu, and Y. Li. Large language models empowered agent-based modeling and simulation: A survey and perspectives. *Humanities and Social Sciences Communications*, 11(1):1-24, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.703, + 0.825, + 0.73 + ], + "angle": 0, + "content": "[16] C. Gao, X. Lan, Z. Lu, J. Mao, J. Piao, H. Wang, D. Jin, and Y. Li. S3: Social-network simulation system with large language model-empowered agents. arXiv preprint arXiv:2307.14984, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.74, + 0.825, + 0.779 + ], + "angle": 0, + "content": "[17] S. Giorgi, V. E. Lynn, K. Gupta, F. Ahmed, S. Matz, L. H. Ungar, and H. A. Schwartz. Correcting sociodemographic selection biases for population prediction from social media. In Proceedings of the International AAAI Conference on Web and Social Media, volume 16, pages 228-240, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.788, + 0.825, + 0.826 + ], + "angle": 0, + "content": "[18] B. E. GOLDSMITH, Y. HORIUCHI, and K. MATUSH. Does public diplomacy sway foreign public opinion? identifying the effect of high-level visits. American Political Science Review, 115(4):1342-1357, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.836, + 0.825, + 0.875 + ], + "angle": 0, + "content": "[19] D. Guo, D. Yang, H. Zhang, J. Song, R. Zhang, R. Xu, Q. Zhu, S. Ma, P. Wang, X. Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.885, + 0.825, + 0.912 + ], + "angle": 0, + "content": "[20] B. Gómez-Calderón and Y. Ceballos. Journalism and artificial intelligence: the treatment of the chatbots in the Spanish press. index.comunicación, 14(1):281–300, Jan. 2024." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.113, + 0.826, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.12 + ], + "angle": 0, + "content": "[21] J. C. Jackson, D. Rand, K. Lewis, M. I. Norton, and K. Gray. Agent-based modeling: A guide for social psychologists. Social Psychological and Personality Science, 8(4):387-395, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.128, + 0.827, + 0.157 + ], + "angle": 0, + "content": "[22] A. Joshi, S. Kale, S. Chandel, and D. K. Pal. Likert scale: Explored and explained. British journal of applied science & technology, 7(4):396-403, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.165, + 0.827, + 0.192 + ], + "angle": 0, + "content": "[23] M. Jusup, P. Holme, K. Kanazawa, M. Takayasu, I. Romić, Z. Wang, S. Geček, T. Lipić, B. Podobnik, L. Wang, et al. Social physics. Physics Reports, 948:1-148, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.201, + 0.825, + 0.229 + ], + "angle": 0, + "content": "[24] S. Keeter, N. Hatley, A. Lau, and C. Kennedy. What 2020's election poll errors tell us about the accuracy of issue polling. Pew Research Center Methods, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.238, + 0.825, + 0.278 + ], + "angle": 0, + "content": "[25] W. Kwon, Z. Li, S. Zhuang, Y. Sheng, L. Zheng, C. H. Yu, J. E. Gonzalez, H. Zhang, and I. Stoica. Efficient memory management for large language model serving with pagedattention. In Proceedings of the ACM SIGOPS 29th Symposium on Operating Systems Principles, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.287, + 0.827, + 0.327 + ], + "angle": 0, + "content": "[26] S. Lee, T.-Q. Peng, M. H. Goldberg, S. A. Rosenthal, J. E. Kotcher, E. W. Maibach, and A. Leiserowitz. Can large language models capture public opinion about global warming? an empirical assessment of algorithmic fidelity and bias. arXiv preprint arXiv:2311.00217, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.336, + 0.825, + 0.364 + ], + "angle": 0, + "content": "[27] A. Liu, B. Feng, B. Xue, B. Wang, B. Wu, C. Lu, C. Zhao, C. Deng, C. Zhang, C. Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.372, + 0.825, + 0.401 + ], + "angle": 0, + "content": "[28] B. Liu, Y. Xu, Y. Yang, and S. Lu. How public cognition influences public acceptance of ccus in china: Based on the abc (affect, behavior, and cognition) model of attitudes. Energy Policy, 156:112390, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.409, + 0.825, + 0.449 + ], + "angle": 0, + "content": "[29] X. Liu, S. Yang, X. Zhang, H. Kuang, L. Sun, Y. Yang, S. Chen, X. Huang, and Z. Wei. Ai-press: A multi-agent news generating and feedback simulation system powered by large language models. arXiv preprint arXiv:2410.07561, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.459, + 0.825, + 0.485 + ], + "angle": 0, + "content": "[30] Y. Liu, X. Chen, X. Zhang, X. Gao, J. Zhang, and R. Yan. From skepticism to acceptance: Simulating the attitude dynamics toward fake news. arXiv preprint arXiv:2403.09498, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.495, + 0.827, + 0.534 + ], + "angle": 0, + "content": "[31] H. Lyu, S. Jiang, H. Zeng, Y. Xia, Q. Wang, S. Zhang, R. Chen, C. Leung, J. Tang, and J. Luo. Llm-rec: Personalized recommendation via prompting large language models. arXiv preprint arXiv:2307.15780, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.544, + 0.825, + 0.572 + ], + "angle": 0, + "content": "[32] C. M. Macal and M. J. North. Agent-based modeling and simulation. In Proceedings of the 2009 winter simulation conference (WSC), pages 86-98. IEEE, 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.58, + 0.827, + 0.62 + ], + "angle": 0, + "content": "[33] B. Major, A. Blodorn, and G. Major Blascovich. The threat of increasing diversity: Why many white americans support trump in the 2016 presidential election. Group Processes & Intergroup Relations, 21(6):931-940, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.629, + 0.825, + 0.67 + ], + "angle": 0, + "content": "[34] X. Mou, X. Ding, Q. He, L. Wang, J. Liang, X. Zhang, L. Sun, J. Lin, J. Zhou, X. Huang, et al. From individual to society: A survey on social simulation driven by large language model-based agents. arXiv preprint arXiv:2412.03563, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.678, + 0.825, + 0.718 + ], + "angle": 0, + "content": "[35] X. Mou, Z. Li, H. Lyu, J. Luo, and Z. Wei. Unifying local and global knowledge: Empowering large language models as political experts with knowledge graphs. In Proceedings of the ACM Web Conference 2024, pages 2603–2614, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.727, + 0.827, + 0.755 + ], + "angle": 0, + "content": "[36] X. Mou, J. Liang, J. Lin, X. Zhang, X. Liu, S. Yang, R. Ye, L. Chen, H. Kuang, X. Huang, and Z. Wei. Agentsense: Benchmarking social intelligence of language agents through interactive scenarios, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.764, + 0.825, + 0.792 + ], + "angle": 0, + "content": "[37] X. Mou, Z. Wei, and X. Huang. Unveiling the truth and facilitating change: Towards agent-based large-scale social movement simulation. arXiv preprint arXiv:2402.16333, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.801, + 0.827, + 0.828 + ], + "angle": 0, + "content": "[38] NBS China. Communiqué of the Seventh National Population Census of the People's Republic of China. Technical report, 2023. Accessed: 2025-02-14." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.837, + 0.825, + 0.864 + ], + "angle": 0, + "content": "[39] NBS China. Explanatory Notes on Main Statistical Indicators – Population, Society, and Labor (China Statistical Yearbook 2023), 2023. Accessed: 2025-02-14." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.873, + 0.657, + 0.888 + ], + "angle": 0, + "content": "[40] NBS China. China Statistical Yearbook 2024, 2024. Accessed: 2025-02-14." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.898, + 0.676, + 0.913 + ], + "angle": 0, + "content": "[41] OpenAI. GPT-4o System Card. Technical report, 2024. Accessed: 2025-02-14." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.092, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.133 + ], + "angle": 0, + "content": "[42] J. S. Park, J. O'Brien, C. J. Cai, M. R. Morris, P. Liang, and M. S. Bernstein. Generative agents: Interactive simulacra of human behavior. In Proceedings of the 36th annual acm symposium on user interface software and technology, pages 1-22, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.139, + 0.827, + 0.166 + ], + "angle": 0, + "content": "[43] J. S. Park, C. Q. Zou, A. Shaw, B. M. Hill, C. Cai, M. R. Morris, R. Willer, P. Liang, and M. S. Bernstein. Generative agent simulations of 1,000 people. arXiv preprint arXiv:2411.10109, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.173, + 0.827, + 0.2 + ], + "angle": 0, + "content": "[44] L. Peisakhin, N. Stoop, and P. Van der Windt. Who hosts? the correlates of hosting the internally displaced. American Political Science Review, pages 1-16, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.206, + 0.827, + 0.246 + ], + "angle": 0, + "content": "[45] F. Ribeiro, L. Henrique, F. Benevenuto, A. Chakraborty, J. Kulshrestha, M. Babaei, and K. Gummadi. Media bias monitor: Quantifying biases of social media news outlets at large-scale. In Proceedings of the International AAAI Conference on Web and Social Media, volume 12, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.253, + 0.553, + 0.267 + ], + "angle": 0, + "content": "[46] S. J. Rosenstone. Forecasting presidential elections. 1981." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.274, + 0.765, + 0.289 + ], + "angle": 0, + "content": "[47] T. C. Schelling. Models of segregation. The American economic review, 59(2):488-493, 1969." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.295, + 0.826, + 0.31 + ], + "angle": 0, + "content": "[48] T. C. Schelling. Dynamic models of segregation. Journal of mathematical sociology, 1(2):143-186, 1971." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.316, + 0.825, + 0.344 + ], + "angle": 0, + "content": "[49] Y. Shao, L. Li, J. Dai, and X. Qiu. Character-llm: A trainable agent for role-playing. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 13153–13187, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.35, + 0.825, + 0.377 + ], + "angle": 0, + "content": "[50] E. R. Smith and F. R. Conrey. Agent-based modeling: A new approach for theory building in social psychology. *Personality and social psychology review*, 11(1):87-104, 2007." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.383, + 0.825, + 0.41 + ], + "angle": 0, + "content": "[51] L. Sun, S. Wang, X. Huang, and Z. Wei. Identity-driven hierarchical role-playing agents. arXiv preprint arXiv:2407.19412, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.417, + 0.744, + 0.432 + ], + "angle": 0, + "content": "[52] S. Tang. Idea, action, and outcome. Innovation in the Social Sciences, 2(2):123-170, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.438, + 0.827, + 0.464 + ], + "angle": 0, + "content": "[53] R. A. Teixeira. Red, blue, and purple America: the future of election demographics. Rowman & Littlefield, 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.472, + 0.826, + 0.5 + ], + "angle": 0, + "content": "[54] T. Trimborn, P. Otte, S. Cramer, M. Beikirch, E. Pabich, and M. Frank. Subcemm: A simulator for agent-based computational economic market models. Computational economics, 55(2):707-744, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.506, + 0.825, + 0.534 + ], + "angle": 0, + "content": "[55] A. van Dalen. Revisiting the algorithms behind the headlines. how journalists respond to professional competition of generative ai. Journalism Practice, pages 1-18, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.54, + 0.825, + 0.567 + ], + "angle": 0, + "content": "[56] L. Wang, J. Zhang, H. Yang, Z. Chen, J. Tang, Z. Zhang, X. Chen, Y. Lin, R. Song, W. X. Zhao, et al. User behavior simulation with large language model based agents. arXiv preprint arXiv:2306.02552, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.573, + 0.827, + 0.625 + ], + "angle": 0, + "content": "[57] K. Wu, X. Mou, L. Xue, Z. Ying, W. Wang, Q. Zhang, X.-J. Huang, and Z. Wei. Pasum: A pre-training architecture for social media user modeling based on text graph. In Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024), pages 12644-12656, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.633, + 0.827, + 0.671 + ], + "angle": 0, + "content": "[58] B. Xiao, Z. Yin, and Z. Shan. Simulating public administration crisis: A novel generative agent-based simulation system to lower technology barriers in social science research. arXiv preprint arXiv:2311.06957, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.679, + 0.825, + 0.706 + ], + "angle": 0, + "content": "[59] A. Yang, B. Yang, B. Zhang, B. Hui, B. Zheng, B. Yu, C. Li, D. Liu, F. Huang, H. Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.713, + 0.827, + 0.74 + ], + "angle": 0, + "content": "[60] Z. Yang, Z. Zhang, Z. Zheng, Y. Jiang, Z. Gan, Z. Wang, Z. Ling, J. Chen, M. Ma, B. Dong, et al. Oasis: Open agents social interaction simulations on one million agents. arXiv preprint arXiv:2411.11581, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.746, + 0.825, + 0.773 + ], + "angle": 0, + "content": "[61] R. Ye, Y. Zhang, Y. Zhang, H. Kuang, Z. Wei, and P. Sun. Multi-agent kto: Reinforcing strategic interactions of large language model in language game. arXiv preprint arXiv:2501.14225, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.78, + 0.825, + 0.807 + ], + "angle": 0, + "content": "[62] S. Yue, S. Wang, W. Chen, X. Huang, and Z. Wei. Synergistic multi-agent framework with trajectory learning for knowledge-intensive tasks. arXiv preprint arXiv:2407.09893, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.814, + 0.827, + 0.866 + ], + "angle": 0, + "content": "[63] X. Zhang, H. Kuang, X. Mou, H. Lyu, K. Wu, S. Chen, J. Luo, X. Huang, and Z. Wei. SoMeLVLM: A large vision language model for social media processing. In L.-W. Ku, A. Martins, and V. Srikumar, editors, Findings of the Association for Computational Linguistics ACL 2024, pages 2366-2389, Bangkok, Thailand and virtual meeting, Aug. 2024. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.873, + 0.827, + 0.911 + ], + "angle": 0, + "content": "[64] X. Zhang, J. Lin, L. Sun, W. Qi, Y. Yang, Y. Chen, H. Lyu, X. Mou, S. Chen, J. Luo, et al. Electionsim: Massive population election simulation powered by large language model driven agents. arXiv preprint arXiv:2410.20746, 2024." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.092, + 0.827, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.09, + 0.395, + 0.108 + ], + "angle": 0, + "content": "A Data Cleaning Details" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.121, + 0.39, + 0.135 + ], + "angle": 0, + "content": "A.1 Content Data Extraction" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.146, + 0.825, + 0.175 + ], + "angle": 0, + "content": "We extract only post-related content on all the social media platforms to avoid violating privacy policies. Specifically, the data list on each platform is shown in Table 6." + }, + { + "type": "table", + "bbox": [ + 0.338, + 0.186, + 0.66, + 0.264 + ], + "angle": 0, + "content": "
PlatformData list
Xuser ID, tweet, #likes, #coments, #retweets
Rednoteuser ID, notes, #likes, #comments
" + }, + { + "type": "table_caption", + "bbox": [ + 0.25, + 0.268, + 0.745, + 0.284 + ], + "angle": 0, + "content": "Table 6: Data list for each social media platform during the data collection." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.301, + 0.394, + 0.317 + ], + "angle": 0, + "content": "A.2 Abnormal Data Filtering" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.327, + 0.828, + 0.385 + ], + "angle": 0, + "content": "We filter the abnormal data to guarantee the quality through text similarity calculation. Typically, all the textual content from the same user is calculated by means of the word repetition ratio. The threshold is set to 0.3. If the ratio surpasses the threshold, the user is considered likely to be a robot or advertising and will be filtered." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.935, + 0.509, + 0.948 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.09, + 0.495, + 0.108 + ], + "angle": 0, + "content": "B Demographics Annotation System" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.12, + 0.338, + 0.135 + ], + "angle": 0, + "content": "B.1 LLM Annotation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.146, + 0.828, + 0.217 + ], + "angle": 0, + "content": "To save costs, we first sample a subset of the user pool and employ multiple power LLMs for annotation. Due to the long time span of this work, users from different data sources in the user pool have used the powerful LLMs available at the time. For users derived from the X, GPT-4o\\(^5\\), Claude3.5-Sonnet\\(^6\\), and Gemini-1.5\\(^7\\) are employed. For users derived from the Rednote, GPT-4o, Cluade3.5-Sonnet, and Qwen2.5-72b are employed." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.232, + 0.35, + 0.246 + ], + "angle": 0, + "content": "B.2 Human Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.257, + 0.825, + 0.313 + ], + "angle": 0, + "content": "We employ 7 professional human annotators to verify the results annotated by LLMs. Typically, each annotator is required to re-associate the demographic factors without the LLM labels. All the data are verified by at least 2 human annotators. The overall consistency between humans and LLMs is shown in Table 7." + }, + { + "type": "table", + "bbox": [ + 0.306, + 0.322, + 0.694, + 0.439 + ], + "angle": 0, + "content": "
ModelsHuman (X)Human (Rednote)
GPT-4o0.9050.723
Claude3.50.9010.659
Gemini-1.50.713\\
Qwen2.5\\0.846
Majority votes0.9560.849
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.443, + 0.825, + 0.471 + ], + "angle": 0, + "content": "Table 7: Human annotators' verification results. We report the consistency between humans and different LLMs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.492, + 0.347, + 0.508 + ], + "angle": 0, + "content": "B.3 Classifier Training" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.517, + 0.825, + 0.573 + ], + "angle": 0, + "content": "We take the majority-voted labels from different LLMs to construct the training dataset. Considering the difference in mainstream language used on different platforms, we employ LongFormer [7] for X data and employ Bert-base-chinese [12] for Rednote. The implementation details are shown in Table 8." + }, + { + "type": "table", + "bbox": [ + 0.306, + 0.583, + 0.693, + 0.748 + ], + "angle": 0, + "content": "
ParamsLongFormerBert-base-chinese
train_size10,00010,000
# classifiers54
max_tokens4096512
learning_rate5e-55e-5
batch_size1632
optimizerAdamWAdamW
epochs310
device8*40902*4090
" + }, + { + "type": "table_caption", + "bbox": [ + 0.3, + 0.752, + 0.696, + 0.767 + ], + "angle": 0, + "content": "Table 8: Implementation details for demographic classifiers." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.777, + 0.791, + 0.792 + ], + "angle": 0, + "content": "We report the performances of demographic classifiers on each demographic factor in Table 9." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.808, + 0.473, + 0.822 + ], + "angle": 0, + "content": "B.4 Overall Distribution of the User Pool" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.833, + 0.825, + 0.862 + ], + "angle": 0, + "content": "We employ the demographic classifiers to annotate all of the users in the user pool, and the overall distributions are shown in Figure 5. For other demographics in specific simulations that are not" + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.87, + 0.335, + 0.884 + ], + "angle": 0, + "content": "5 gpt-4o-2024-08-06" + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.884, + 0.403, + 0.897 + ], + "angle": 0, + "content": "6claude-3-5-sonnet-20240620" + }, + { + "type": "page_footnote", + "bbox": [ + 0.194, + 0.897, + 0.292, + 0.912 + ], + "angle": 0, + "content": "7 gemini-1.5-pro" + }, + { + "type": "list", + "bbox": [ + 0.193, + 0.87, + 0.403, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.306, + 0.089, + 0.691, + 0.243 + ], + "angle": 0, + "content": "
DemosLongFormerBert-base-chinese
AccF1AccF1
Gender0.8750.9040.9260.958
Age0.9020.8730.9250.920
Party0.8490.846\\\\
Ideology0.8100.807\\\\
Race0.7790.768\\\\
Consumption\\\\0.7490.748
Education\\\\0.9540.975
" + }, + { + "type": "table_caption", + "bbox": [ + 0.301, + 0.248, + 0.695, + 0.264 + ], + "angle": 0, + "content": "Table 9: Performance of demographic classifiers on test set." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.282, + 0.825, + 0.311 + ], + "angle": 0, + "content": "considered in prior distribution, only users from the sampled user pool are annotated by the majority votes of LLMs." + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.324, + 0.489, + 0.519 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.493, + 0.325, + 0.816, + 0.519 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.284, + 0.53, + 0.713, + 0.546 + ], + "angle": 0, + "content": "Figure 5: Demographic distribution on X and Rednote user pool." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.09, + 0.579, + 0.108 + ], + "angle": 0, + "content": "C Demographic Distribution Sampling Details" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.12, + 0.423, + 0.136 + ], + "angle": 0, + "content": "C.1 Iterative Proportional Fitting" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.146, + 0.828, + 0.203 + ], + "angle": 0, + "content": "In our study, we follow the classical IPF method to construct the joint distribution of all the attributes in our simulation. Specifically, we start with a two-way table with individual components denoted as \\( x_{ij} \\) and targeted estimation \\( \\hat{x}_{ij} \\). The targeted estimation \\( \\hat{x}_{ij} \\) satisfies \\( \\Sigma_j\\hat{x}_{ij} = v_i \\) and \\( \\Sigma_i\\hat{x}_{ij} = w_j \\). The iterations are specified as follows:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.212, + 0.355, + 0.233 + ], + "angle": 0, + "content": "Let \\(\\hat{x}_{ij}^{(0)} = x_{ij}\\). For \\(\\alpha > 1\\):" + }, + { + "type": "equation", + "bbox": [ + 0.413, + 0.24, + 0.826, + 0.284 + ], + "angle": 0, + "content": "\\[\n\\hat {x} _ {i j} ^ {(2 \\alpha - 1)} = \\frac {\\hat {x} _ {i j} ^ {(2 \\alpha - 2)} v _ {i}}{\\sum_ {k = 1} ^ {J} \\hat {x} _ {i j} ^ {(2 \\alpha - 2)}} \\tag {1}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.422, + 0.287, + 0.826, + 0.332 + ], + "angle": 0, + "content": "\\[\n\\hat {x} _ {i j} ^ {(2 \\alpha)} = \\frac {\\hat {x} _ {i j} ^ {(2 \\alpha - 1)} w _ {j}}{\\Sigma_ {k = 1} ^ {I} \\hat {x} _ {i j} ^ {(2 \\alpha - 1)}} \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.34, + 0.825, + 0.369 + ], + "angle": 0, + "content": "The iterations end when the estimated marginals are sufficiently close to the real marginals or when they stabilize without further convergence." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.374, + 0.826, + 0.458 + ], + "angle": 0, + "content": "For the presidential election simulation, we implement the IPF algorithm for each state using five attributes: gender, race, age group, ideology, and partisanship. In most cases, the algorithm does not converge, but the gaps between the estimated and actual marginals are less than \\(5\\%\\), with 888 out of 918 marginals falling within this range. For the outliers, since IPF adjusts proportionally to the marginals, the overall ratio of marginals remains consistent. We then use the estimated joint distribution and marginals for our massive simulation." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.473, + 0.44, + 0.489 + ], + "angle": 0, + "content": "C.2 Identical Distribution Sampling" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.498, + 0.825, + 0.542 + ], + "angle": 0, + "content": "Identical distribution sampling, also known as direct sampling, is applied when the joint distribution of multiple demographics is available. Given feature \\( X \\) and \\( Y \\), the joint distribution can be formulated as \\( p(X,Y) \\). Then, identical distribution sampling can be formulated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.379, + 0.544, + 0.826, + 0.561 + ], + "angle": 0, + "content": "\\[\n\\left(X _ {i}, Y _ {i}\\right) \\sim p (X, Y) \\quad i = 1, 2, \\dots , n \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.571, + 0.827, + 0.614 + ], + "angle": 0, + "content": "For breaking news feedback simulations, as the ground truth set is directly from the Rednote, we can obtain all the users' demographics and calculate the joint distribution. Simultaneously, the scale of the user pool satisfies the direct sampling requirements." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.629, + 0.551, + 0.644 + ], + "angle": 0, + "content": "C.3 Prior Distribution of National Economic Survey" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.654, + 0.827, + 0.752 + ], + "angle": 0, + "content": "For the national economic survey distribution, only average income is available from the official data. As a result, we generate the prior income distribution at the regional level. The income distribution across different regions exhibits significant heterogeneity, often characterized by a right-skewed pattern. To model this distribution, we adopt a mixture distribution approach, combining a lognormal distribution for the majority of the population with a Pareto distribution for the high-income segment. This hybrid model captures both the bulk of wage earners and the long-tail effect observed in high-income groups." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.757, + 0.825, + 0.787 + ], + "angle": 0, + "content": "Formally, let \\( X \\) denote an individual's wage. We assume that for the lower and middle-income groups \\( (X < x_{\\min}) \\), incomes follow a log-normal distribution:" + }, + { + "type": "equation", + "bbox": [ + 0.413, + 0.798, + 0.826, + 0.817 + ], + "angle": 0, + "content": "\\[\nX \\sim \\log \\text {N o r m a l} (\\mu , \\sigma^ {2}) \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.82, + 0.218, + 0.832 + ], + "angle": 0, + "content": "where" + }, + { + "type": "equation", + "bbox": [ + 0.321, + 0.831, + 0.826, + 0.868 + ], + "angle": 0, + "content": "\\[\n\\mu = \\ln \\left(\\frac {\\mu_ {\\text {a c t u a l}} ^ {2}}{\\sqrt {\\sigma_ {\\text {a c t u a l}} ^ {2} + \\mu_ {\\text {a c t u a l}} ^ {2}}}\\right), \\quad \\sigma = \\sqrt {\\ln \\left(1 + \\frac {\\sigma_ {\\text {a c t u a l}} ^ {2}}{\\mu_ {\\text {a c t u a l}} ^ {2}}\\right)} \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.877, + 0.676, + 0.893 + ], + "angle": 0, + "content": "For the high-income group \\((X\\geq x_{min})\\) , wages follow a Pareto distribution:" + }, + { + "type": "equation", + "bbox": [ + 0.385, + 0.897, + 0.826, + 0.914 + ], + "angle": 0, + "content": "\\[\nP (X \\geq x) = C x ^ {- \\alpha}, \\quad x \\geq x _ {\\min } \\tag {6}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.175, + 0.093, + 0.825, + 0.188 + ], + "angle": 0, + "content": "where \\(\\alpha\\) is the Pareto shape parameter determining the income concentration at the top. The proportion of individuals assigned to each distribution is governed by an empirical threshold ratio, typically set such that \\(90\\%\\) of the population follows the log-normal distribution while \\(10\\%\\) follows the Pareto distribution. This mixture approach provides a flexible yet robust framework for simulating realistic income distributions across diverse economic conditions. We set all the parameters empirically according to previous research and generate the income distribution for 31 regions in China (Hong Kong, Macao, and Taiwan are excluded)." + }, + { + "type": "page_number", + "bbox": [ + 0.492, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.09, + 0.456, + 0.109 + ], + "angle": 0, + "content": "D Questionnaire Design Details" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.121, + 0.57, + 0.137 + ], + "angle": 0, + "content": "We provide the questionnaires here for all three simulations." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.152, + 0.566, + 0.167 + ], + "angle": 0, + "content": "D.1 Questionnaire for Presidential Election Prediction" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.184, + 0.859, + 0.901 + ], + "angle": 0, + "content": "
Q01Voting Behavior
QuestionORDER OF MAJOR PARTY CANDIDATE NAMES
Value Labels1. Democrat first / Republican second2. Republican first / Democrat second
Q02Social Security
QuestionNext I am going to read you a list of federal programs. For each one, I would like you to tell me whether you would like to see spending increased, decreased, or kept the same.What about Social Security? Should federal spending on Social Security be increased, decreased, or kept the same?
Value Labels-2. DK/RF1. Increased2. Decreased3. Kept the same
Q03Education
QuestionWhat about public schools? Should federal spending on public schools be increased, decreased, or kept the same?
Value Labels-2. DK/RF1. Increased2. Decreased3. Kept the same
Q04Immigration
QuestionWhat about tightening border security to prevent illegal immigration? Should federal spending on tightening border security to prevent illegal immigration be increased, decreased, or kept the same?
Value Labels-2. DK/RF1. Increased2. Decreased3. Kept the same
Q05Criminal Justice
QuestionWhat about dealing with crime? Should federal spending on dealing with crime be increased, decreased, or kept the same?
Value Labels-2. DK/RF1. Increased2. Decreased3. Kept the same
Q06Social Welfare
QuestionWhat about welfare programs? Should federal spending on welfare programs be increased, decreased, or kept the same?
Value Labels-2. DK/RF1. Increased2. Decreased3. Kept the same
Q07Infrastructure
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.174, + 0.088, + 0.857, + 0.861 + ], + "angle": 0, + "content": "
QuestionWhat about building and repairing highways? Should federal spending on building and repairing highways be increased, decreased, or kept the same?
Value Labels-2. DK/RF\n1. Increased\n2. Decreased\n3. Kept the same
Q08Aid to Poor
QuestionWhat about aid to the poor? Should federal spending on aid to the poor be increased, decreased, or kept the same?
Value Labels-2. DK/RF\n1. Increased\n2. Decreased\n3. Kept the same
Q09Environment
QuestionWhat about protecting the environment? Should federal spending on protecting the environment be increased, decreased, or kept the same?
Value Labels-2. DK/RF\n1. Increased\n2. Decreased\n3. Kept the same
Q10Government
QuestionHow much do you feel that having elections makes the government pay attention to what the people think?
Value Labels-2. DK/RF\n1. A good deal\n2. Some\n3. Not much
Q11Economy
QuestionWhich party do you think would do a better job of handling the nation's economy?
Value Labels-2. DK/RF\n1. Democrats would do a better job\n2. Not much difference between them\n3. Republicans would do a better job
Q12Health Care
QuestionWhich party do you think would do a better job of handling health care?
Value Labels-2. DK/RF\n1. Democrats would do a better job\n2. Not much difference between them\n3. Republicans would do a better job
Q13Immigration
QuestionWhich party do you think would do a better job of handling immigration?
Value Labels-2. DK/RF\n1. Democrats would do a better job\n2. Not much difference between them\n3. Republicans would do a better job
Q14Taxes
QuestionWhich party do you think would do a better job of handling taxes?
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.174, + 0.09, + 0.857, + 0.9 + ], + "angle": 0, + "content": "
Value Labels-2. DK/RF\n1. Democrats would do a better job\n2. Not much difference between them\n3. Republicans would do a better job
Q15Environment
QuestionWhich party do you think would do a better job of handling the environment?
Value Labels-2. DK/RF\n1. Democrats would do a better job\n2. Not much difference between them\n3. Republicans would do a better job
Q16Education
QuestionSome people think the government should provide fewer services even in areas such as health and education in order to reduce spending.\nOther people feel it is important for the government to provide many more services even if it means an increase in spending.\nAnd, of course, some people have a neutral position.\nWhich of the following best describes your view?
Value Labels-2. DK/RF\n1. Government should provide fewer services\n2. Neutral\n3. Government should provide more services
Q17Defense
QuestionSome people believe that we should spend less money for defense.\nOthers feel that defense spending should be increased.\nAnd, of course, some people have a neutral position.\nWhich of the following best describes your view?
Value Labels-2. DK/RF\n1. Decrease defense spending\n2. Neutral\n3. Increase defense spending
Q18Health Care
QuestionThere is much concern about the rapid rise in medical and hospital costs.\nSome people feel there should be a government insurance plan which would cover all medical and hospital expenses for everyone.\nOthers feel that all medical expenses should be paid by individuals through private insurance plans like Blue Cross or other company paid plans.\nAnd, of course, some people have a neutral position.\nWhich of the following best describes your view?
Value Labels-2. DK/RF\n1. Government insurance plan\n2. Neutral\n3. Private insurance plan
Q19Social Welfare
QuestionSome people feel the government in Washington should see to it that every person has a job and a good standard of living.\nOthers think the government should just let each person get ahead on their own.\nAnd, of course, some people have a neutral position.\nWhich of the following best describes your view?
Value Labels-2. DK/RF\n1. Government should see to jobs and standard of living\n2. Neutral\n3. Government should let each person get ahead on own
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.174, + 0.09, + 0.857, + 0.902 + ], + "angle": 0, + "content": "
Q20Aid to Blacks
QuestionSome people feel that the government in Washington should make every effort to improve the social and economic position of blacks.\nOthers feel that the government should not make any special effort to help blacks because they should help themselves.\nAnd, of course, some people have a neutral position.\nWhich of the following best describes your view?
Value Labels-2. DK/RF\n1. Government should help blacks\n2. Neutral\n3. Blacks should help themselves
Q21Environment
QuestionSome people think we need much tougher government regulations on business in order to protect the environment.\nOthers think that current regulations to protect the environment are already too much of a burden on business.\nAnd, of course, some people have a neutral position.\nWhich of the following best describes your view?
Value Labels-2. DK/RF\n1. Tougher regulations on business needed to protect environment\n2. Neutral\n3. Regulations to protect environment already too much a burden on business
Q22Abortion
QuestionWould you be pleased, upset, or neither pleased nor upset if the Supreme Court reduced abortion rights?
Value Labels-2. DK/RF\n1. Pleased\n2. Upset\n3. Neither pleased nor upset
Q23Criminal Justice
QuestionDo you favor or oppose the death penalty for persons convicted of murder?
Value Labels-2. DK/RF\n1. Favor\n2. Oppose
Q24US Position in World
QuestionDo you agree or disagree with this statement: ‘This country would be better off if we just stayed home and did not concern ourselves with problems in other parts of the world.’
Value Labels-2. DK/RF\n1. Agree\n2. Disagree
Q25US Position in World
QuestionHow willing should the United States be to use military force to solve international problems?
Value Labels-2. DK/RF\n1. Willing\n2. Moderately willing\n3. Not willing
Q26Inequality
QuestionDo you think the difference in incomes between rich people and poor people in the United States today is larger, smaller, or about the same as it was 20 years ago?
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.947 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.174, + 0.09, + 0.857, + 0.899 + ], + "angle": 0, + "content": "
Value Labels-2. DK/RF\n1. Larger\n2. Smaller\n3. About the same
Q27Environment
QuestionDo you think the federal government should be doing more about rising temperatures, should be doing less, or is it currently doing the right amount?
Value Labels-2. DK/RF\n1. Should be doing more\n2. Should be doing less\n3. Is currently doing the right amount
Q28Parental Leave
QuestionDo you favor, oppose, or neither favor nor oppose requiring employers to offer paid leave to parents of new children?
Value Labels-2. DK/RF\n1. Favor\n2. Oppose\n3. Neither favor nor oppose
Q29LGBTQ+ Rights
QuestionDo you think business owners who provide wedding-related services should be allowed to refuse services to same-sex couples if same-sex marriage violates their religious beliefs, or do you think business owners should be required to provide services regardless of a couple's sexual orientation?
Value Labels-2. DK/RF\n1. Should be allowed to refuse\n2. Should be required to provide services
Q30LGBTQ+ Rights
QuestionShould transgender people - that is, people who identify themselves as the sex or gender different from the one they were born as - have to use the bathrooms of the gender they were born as, or should they be allowed to use the bathrooms of their identified gender?
Value Labels-2. DK/RF\n1. Have to use the bathrooms of the gender they were born as\n2. Be allowed to use the bathrooms of their identified gender
Q31LGBTQ+ Rights
QuestionDo you favor or oppose laws to protect gays and lesbians against job discrimination?
Value Labels-2. DK/RF\n1. Favor\n2. Oppose
Q32LGBTQ+ Rights
QuestionDo you think gay or lesbian couples should be legally permitted to adopt children?
Value Labels-2. DK/RF\n1. Yes\n2. No
Q33LGBTQ+ Rights
QuestionWhich comes closest to your view? You can just tell me the number of your choice.
Value Labels-2. DK/RF 1. Gay and lesbian couples should be allowed to legally marry\n2. Gay and lesbian couples should be allowed to form civil unions but not legally marry\n3. There should be no legal recognition of gay or lesbian couples' relationship
Q34Immigration
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.174, + 0.088, + 0.857, + 0.859 + ], + "angle": 0, + "content": "
QuestionSome people have proposed that the U.S. Constitution should be changed so that the children of unauthorized immigrants do not automatically get citizenship if they are born in this country.\nDo you favor, oppose, or neither favor nor oppose this proposal?
Value Labels-2. DK/RF\n1. Favor\n2. Oppose\n3. Neither favor nor oppose
Q35Immigration
QuestionWhat should happen to immigrants who were brought to the U.S. illegally as children and have lived here for at least 10 years and graduated high school here? Should they be sent back where they came from, or should they be allowed to live and work in the United States?
Value Labels-2. DK/RF\n1. Should be sent back where they came from\n2. Should be allowed to live and work in the US
Q36Immigration
QuestionDo you favor, oppose, or neither favor nor oppose building a wall on the U.S. border with Mexico?
Value Labels-2. DK/RF\n1. Favor\n2. Oppose\n3. Neither favor nor oppose
Q37Unrest
QuestionDuring the past few months, would you say that most of the actions taken by protestors to get the things they want have been violent, or have most of these actions by protestors been peaceful, or have these actions been equally violent and peaceful?
Value Labels-2. DK/RF\n1. Mostly violent\n2. Mostly peaceful\n3. Equally violent and peaceful
Q38Government
QuestionDo you think it is better when one party controls both the presidency and Congress, better when control is split between the Democrats and Republicans, or doesn’t it matter?
Value Labels-2. DK/RF\n1. Better when one party controls both\n2. Better when control is split\n3. It doesn’t matter
Q39Government
QuestionWould you say the government is pretty much run by a few big interests looking out for themselves or that it is run for the benefit of all the people?
Value Labels-2. DK/RF\n1. Run by a few big interests\n2. For the benefit of all the people
Q40Government
QuestionDo you think that people in government waste a lot of the money we pay in taxes, waste some of it, or don’t waste very much of it?
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.174, + 0.088, + 0.859, + 0.873 + ], + "angle": 0, + "content": "
Value Labels-2. DK/RF\n1. Waste a lot\n2. Waste some\n3. Don’t waste very much
Q41Election Integrity
QuestionDo you favor, oppose, or neither favor nor oppose allowing convicted felons to vote once they complete their sentence?
Value Labels-2. DK/RF\n1. Favor\n2. Oppose\n3. Neither favor nor oppose
Q42Democratic Norms
QuestionHow important is it that news organizations are free to criticize political leaders?
Value Labels-2. DK/RF\n1. Not important\n2. Moderately important\n3. Important
Q43Democratic Norms
QuestionHow important is it that the executive, legislative, and judicial branches of government keep one another from having too much power?
Value Labels-2. DK/RF\n1. Not important\n2. Moderately important\n3. Important
Q44Democratic Norms
QuestionHow important is it that elected officials face serious consequences if they engage in misconduct?
Value Labels-2. DK/RF\n1. Not important\n2. Moderately important\n3. Important
Q45Democratic Norms
QuestionHow important is it that people agree on basic facts even if they disagree politically?
Value Labels-2. DK/RF\n1. Not important\n2. Moderately important\n3. Important
Q46Democratic Norms
QuestionWould it be helpful, harmful, or neither helpful nor harmful if U.S. presidents could work on the country’s problems without paying attention to what Congress and the courts say?
Value Labels-2. DK/RF\n1. Helpful\n2. Harmful\n3. Neither helpful nor harmful
Q47Democratic Norms
QuestionDo you favor, oppose, or neither favor nor oppose elected officials restricting journalists’ access to information about government decision-making?
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.174, + 0.09, + 0.859, + 0.38 + ], + "angle": 0, + "content": "
Value Labels-2. DK/RF\n1. Favor\n2. Oppose\n3. Neither favor nor oppose
Q48Gender Resentment
Question‘Many women interpret innocent remarks or acts as being sexist.’\nDo you agree, neither agree nor disagree, or disagree with this statement?
Value Labels-2. DK/RF/technical error\n1. Agree\n2. Neither agree nor disagree\n3. Disagree
Q49Gender Resentment
Question‘Women seek to gain power by getting control over men.’\nDo you agree, neither agree nor disagree, or disagree with this statement?
Value Labels-2. DK/RF/technical error\n1. Agree\n2. Neither agree nor disagree\n3. Disagree
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.402, + 0.521, + 0.417 + ], + "angle": 0, + "content": "D.2 Questionnaire for Breaking News Feedback" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.434, + 0.859, + 0.911 + ], + "angle": 0, + "content": "
Q01Public Cognition (PC)
QuestionI have heard of ChatGPT.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q02Public Cognition (PC)
QuestionMany people around me use ChatGPT.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q03Public Cognition (PC)
QuestionI have a deep understanding of ChatGPT's functions and applications.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q04Perceived Risks (PR)
QuestionChatGPT may lead to the widespread dissemination of false information.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q05Perceived Risks (PR)
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.173, + 0.088, + 0.857, + 0.844 + ], + "angle": 0, + "content": "
QuestionChatGPT may reduce human thinking ability and creativity.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q06Perceived Risks (PR)
QuestionThe development of ChatGPT may replace certain jobs, and I am deeply concerned about this.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q07Perceived Benefits (PB)
QuestionChatGPT will definitely improve my work and study efficiency.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q08Perceived Benefits (PB)
QuestionChatGPT helps broaden my knowledge and provides me with new perspectives and ideas.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q09Perceived Benefits (PB)
QuestionChatGPT promotes technological innovation and development in related fields.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q10Trust (TR)
QuestionI fully trust the team developing ChatGPT to manage and guide its development responsibly.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q11Trust (TR)
QuestionI have strong confidence in the accuracy and reliability of the information generated by ChatGPT.
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "28" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.173, + 0.088, + 0.859, + 0.895 + ], + "angle": 0, + "content": "
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q12Trust (TR)
QuestionI believe that the future application of ChatGPT will be effectively regulated.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q13Fairness (FA)
QuestionThe opportunities to use ChatGPT are distributed fairly among different groups of people.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q14Fairness (FA)
QuestionI find the distribution of benefits brought by ChatGPT to be fair.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q15Fairness (FA)
QuestionI believe that the decision-making process for the development and promotion of ChatGPT is fully transparent and adequately reflects public interests.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q16Public Acceptance (PA)
QuestionOverall, I strongly welcome the emergence of ChatGPT.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q17Public Acceptance (PA)
QuestionI am definitely willing to use ChatGPT in my work or studies.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q18Public Acceptance (PA)
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "29" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.174, + 0.089, + 0.856, + 0.2 + ], + "angle": 0, + "content": "
QuestionI strongly support increased investment in the research and development of AI technologies like ChatGPT.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.219, + 0.53, + 0.235 + ], + "angle": 0, + "content": "D.3 Questionnaire for National Economic Survey" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.251, + 0.859, + 0.888 + ], + "angle": 0, + "content": "
Q01Food
QuestionWhat is your average monthly expenditure on food (including dining out)? (Unit: CNY)
Value LabelsA. Below 500 CNY\nB. 501-650 CNY\nC. 651-800 CNY\nD. 801-1000 CNY\nE. Above 1000 CNY
Q02Food
QuestionDo you think your current spending on food, tobacco, and alcohol is too high relative to your income?
Value LabelsA. Yes\nB. No\nC. Acceptable
Q03Clothing
QuestionWhat is your average monthly expenditure on clothing (including apparel, shoes, and accessories)? (Unit: CNY)
Value LabelsA. Below 50 CNY\nB. 51-100 CNY\nC. 101-150 CNY\nD. 151-200 CNY\nE. Above 200 CNY
Q04Clothing
QuestionHow much economic pressure do you feel from clothing expenses?
Value LabelsA. Very low, almost no pressure\nB. Moderate, some pressure but manageable\nC. High, requires careful spending\nD. Very high, affects spending in other areas
Q05Household
QuestionWhat is your average monthly housing expenditure? (Including rent, mortgage, property fees, maintenance, etc.) (Unit: CNY)
Value LabelsA. Below 200 CNY\nB. 201-500 CNY\nC. 501-800 CNY\nD. 801-1200 CNY\nE. Above 1200 CNY
Q06Household
QuestionWhat percentage of your monthly income is spent on housing? (Including rent, mortgage, property fees, maintenance, etc.)
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "30" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.174, + 0.09, + 0.857, + 0.906 + ], + "angle": 0, + "content": "
Value LabelsA. Below 10% \nB. 10%-20% \nC. 21%-30% \nD. 31%-40% \nE. Above 40%
Q07Daily Service
QuestionWhat is your average monthly expenditure on daily necessities (personal care, house- hold items, cleaning supplies, etc.) and services (housekeeping, repairs, beauty, pet services, etc.)? (Unit: CNY)
Value LabelsA. Below 80 CNY \nB. 81-120 CNY \nC. 121-160 CNY \nD. 161-200 CNY \nE. Above 200 CNY
Q08Transportation & Communication
QuestionWhat is your average monthly expenditure on transportation (public transport, taxis, fuel, parking, etc.) and communication (mobile and internet fees)? (Unit: CNY)
Value LabelsA. Below 200 CNY \nB. 201-300 CNY \nC. 301-400 CNY \nD. 401-500 CNY \nE. Above 500 CNY
Q09Education & Entertainment
QuestionWhat is your average monthly expenditure on education (tuition, training, books, etc.) and cultural entertainment (movies, performances, games, fitness, cultural activities, etc.)? (Unit: CNY)
Value LabelsA. Below 100 CNY \nB. 101-200 CNY \nC. 201-300 CNY \nD. 301-400 CNY \nE. Above 400 CNY
Q10Education & Entertainment
QuestionCan you easily afford your current education, cultural, and entertainment expenses?
Value LabelsA. Yes, spending does not affect other areas \nB. Barely, needs some control \nC. Not really, affects other expenditures \nD. No, it creates significant financial pressure
Q11Medical
QuestionWhat is your average monthly expenditure on healthcare (medications, medical services, health management, etc.)? (Unit: CNY)
Value LabelsA. Below 100 CNY \nB. 101-200 CNY \nC. 201-300 CNY \nD. 301-400 CNY \nE. Above 400 CNY
Q12Medical
QuestionHave you purchased private medical or health insurance for yourself or your family?
Value LabelsA. Yes \nB. Not yet, but planning to \nC. No, and no plans to
Q13Others
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "31" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.174, + 0.088, + 0.857, + 0.679 + ], + "angle": 0, + "content": "
QuestionBesides food, clothing, housing, daily necessities and services, transportation, education, culture, and healthcare, what is your average monthly expenditure on other areas (e.g., hobbies, charitable donations, investment, etc.)? (Unit: CNY)
Value LabelsA. Below 30 CNY\nB. 31-60 CNY\nC. 61-90 CNY\nD. 91-120 CNY\nE. Above 120 CNY
Q14Overall
QuestionHow would you evaluate the impact of your current consumption level on your household (or personal) financial situation?
Value LabelsA. Comfortable, can moderately increase spending\nB. Average, can maintain current spending\nC. Tight, need to control or reduce spending\nD. Very tight, affects quality of life
Q15Overall
QuestionDo you feel that your consumption pressure is too high relative to your income level?
Value LabelsA. Yes\nB. No\nC. Not sure
Q16Overall
QuestionIf your income increases, which consumption areas would you most like to expand or improve? (Multiple choices allowed)
Value LabelsA. Food and alcohol\nB. Clothing\nC. Housing\nD. Daily necessities and services\nE. Transportation and communication\nF. Education, culture, and entertainment\nG. Healthcare\nH. Other goods and services
Q17Overall
QuestionWhat is your consumption expectation for the next six months to a year?
Value LabelsA. Will continue to increase\nB. Will remain roughly the same\nC. Will moderately decrease\nD. Uncertain
" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "32" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10157/9641b7f1-5ad9-4487-b9c3-7734cccafedc_origin.pdf b/data/2025/2504_10xxx/2504.10157/9641b7f1-5ad9-4487-b9c3-7734cccafedc_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..75221105daca762ffa04ff39d5cea5b07a800c19 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/9641b7f1-5ad9-4487-b9c3-7734cccafedc_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b7b7f409e0a78f0657ee861a2f78358e77b7c82d6ec7e285cb113b296b0d535 +size 1983998 diff --git a/data/2025/2504_10xxx/2504.10157/full.md b/data/2025/2504_10xxx/2504.10157/full.md new file mode 100644 index 0000000000000000000000000000000000000000..027abf7389329d6dbfe6ab428c7704d269a8f0f3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/full.md @@ -0,0 +1,684 @@ +# SocioVerse: A World Model for Social Simulation Powered by LLM Agents and A Pool of 10 Million Real-World Users + +Xinnong Zhang $^{1,2\dagger}$ , Jiayu Lin $^{1,2\dagger}$ , Xinyi Mou $^{2\dagger}$ , Shiyue Yang $^{2}$ , Xiawei Liu $^{2}$ , Libo Sun $^{2}$ , Hanjia Lyu $^{3}$ , Yihang Yang $^{2}$ , Weihong Qi $^{4}$ , Yue Chen $^{2}$ , Guanying Li $^{2}$ , Ling Yan $^{5}$ , Yao Hu $^{5}$ , Siming Chen $^{2}$ , Yu Wang $^{2}$ , Xuanjing Huang $^{2}$ , Jiebo Luo $^{3}$ , Shiping Tang $^{2}$ , Libo Wu $^{1,2}$ , Baohua Zhou $^{2}$ , Zhongyu Wei $^{1,2}$ + +$^{1}$ Shanghai Innovation Institute, $^{2}$ Fudan University, $^{3}$ University of Rochester, $^{4}$ Indiana University, $^{5}$ Xiaohongshu Inc. zywei@fudan.edu.cn +SocioVerse: https://github.com/FudanDISC/SocioVerse + +![](images/ee37a034cc7bfd54903875bca0d8ebdb66151851f1d8c1aae3fe11feeefe474d.jpg) +Figure 1: An illustration of the SocioVerse in the case of Ukraine issue. The alignment challenges are well handled regarding environment, user, scenario, and behavior. + +# Abstract + +Social simulation is transforming traditional social science research by modeling human behavior through interactions between virtual individuals and their environments. With recent advances in large language models (LLMs), this approach has shown growing potential in capturing individual differences and predicting group behaviors. However, existing methods face alignment challenges related to the environment, target users, interaction mechanisms, and behavioral patterns. To this end, we introduce SocioVerse, an LLM-agent-driven world model for social simulation. Our framework features four powerful alignment components and a user pool of 10 million real individuals. To validate its effectiveness, we conducted large-scale simulation experiments across three distinct domains: politics, news, and economics. Results demonstrate that SocioVerse can reflect large-scale population dynamics while ensuring diversity, credibility, and representativeness through standardized procedures and minimal manual adjustments. + +# 1 Introduction + +The study of human behavior aims to understand how individuals and groups act in various social contexts and serves as a cornerstone of social science research. Traditionally, this has been accomplished using methods such as surveys, interviews, and observations [10, 18, 44]. However, these approaches often encounter challenges, including high costs, limited sample sizes, and ethical concerns. As a result, researchers have resorted to alternative methods for studying human behavior. + +Social simulation has emerged as an effective method for addressing this issue, where researchers use agents to model human behavior, observe their reactions, and translate these findings into insights about human behavior [48, 50]. By assigning behavioral rules to autonomous agents, researchers can explore how micro-level decisions lead to emergent macro-level patterns through the agent-based models [11, 21]. This approach enables capturing specific groups' preferences on particular topics and forecasting potential social dynamics. Furthermore, recent advancements in large language models (LLMs) have significantly enhanced agents' reasoning and decision-making capabilities, enabling them to operate and interact within increasingly realistic and complex environments [3, 35, 37]. + +Recent studies have explored social simulation across various levels and scenarios, from mimicking well-known individuals and mirroring specific situations to modeling large-scale social dynamics [4, 29, 34, 36, 49, 60]. However, they share a common challenge: alignment between the simulated environment and the real world, which manifests across multiple dimensions and raises several key questions that remain to be addressed, as shown in Figure 1. + +# Q1. How to align the simulated environment with the real world? + +In the real world, new events occur every day and new content is continuously generated. The behavior of real users is rooted in these ever-evolving social contexts and policy agendas. However, the static knowledge of LLMs prevents them from aligning with the dynamic nature of the real-world social environment [2, 15]. There is a gap between the simulated context and the real world, which results in discrepancies between the simulation process and outcomes compared to those in reality. Therefore, it is necessary to establish an update mechanism to keep the simulated environment synchronized with the real world. + +# Q2. How to align simulated agents with target users precisely? + +The composition of users in the real world is both complex and diverse, making it impractical to enumerate all users in every scenario. Therefore, it is essential to identify target users whose distribution aligns with that of the users in the corresponding scenario, thereby accurately reflecting the real-world composition and relationships [17, 45]. Based on this, precise target user simulation also requires providing agents with a detailed and comprehensive description of the corresponding users, often involving the integration of high-fidelity demographic, contextual, and behavioral data. + +# Q3. How to align the interaction mechanism with the real world among different scenarios? + +The diversity of social interactions presents challenges in social simulation design, requiring deliberate choices regarding the number of individuals, social structures, interaction patterns, and message dissemination mechanisms, to align with the real world. This often results in independently constructed task-specific simulation pipelines performing repetitive work, which reduces their generalizability and scalability [26, 58]. Therefore, there is a need for unified simulation frameworks based on systematic categorization to standardize simulation components and facilitate extensibility across different social scenarios. + +# Q4. How to align the behavioral pattern with the real-world groups? + +When the environment perceived by agents, the user composition, and the interaction mechanisms are aligned with the real world, agents are expected to exhibit responses consistent with those of the corresponding real users. However, current LLMs exhibit inherent bias and limitations in such reasoning, failing to infer different types of user behaviors [16, 60]. Therefore, it is necessary to systematically collect behavior-driving factors across different user characteristics and adopt appropriate modeling approaches to effectively capture diverse behavior patterns. + +In this paper, we propose SocioVerse, a world model for social simulation driven by LLM-based agents based on a large-scale real-world user pool. As shown in Figure 2, we design modular components to address the above questions. The Social Environment injects up-to-date and external real-world information into the simulation. The User Engine and Scenario Engine respectively reconstruct realistic user context and orchestrate the simulation process to align the simulation with + +![](images/af88ad03ac5ce1c47c53dd172206932141236f13f7154e620648f8b4af53663e.jpg) +Figure 2: An illustration of SocioVerse framework involving 4 powerful parts. The social environment provides an updated context for the simulation. During the simulation, the behavior engine takes the simulation setting, user profiles, and social information from the scenario engine, user engine, and social environment, respectively, and generates the results according to the query. + +the real world. Given this rich contextual setup, the Behavior Engine then drives agents to reproduce human behaviors accordingly. + +To support the framework, we construct a user pool of 10 million individuals by collecting real-world social media data to power the user engine. Comparable in scale to the entire populations of Hungary or Greece, this extensive pool enables diverse and large-scale social simulations. For any customized simulation task, various sampling strategies can be applied to extract target user groups from the pool to support the simulation process. + +We conduct three simulations using the SocioVerse framework, each differing in research domain, user composition, and social environment: (a) presidential election prediction, (b) breaking news feedback, and (c) national economic survey. For each task, we compare the simulation results with real-world situations. Extensive and comprehensive experiments demonstrate that our framework serves as a robust foundation for building standardized and accurate large-scale social simulations. In summary, our key contributions are as follows: + +- SocioVerse: We propose a world model for social simulation comprising four powerful alignment modules, enabling diverse and trustworthy social simulations (as illustrated in Figure 2). +- 10M User Pool: A user pool of 10 million individuals, constructed from real-world behavioral data, enables large-scale and diverse social simulations, ranging from small interest groups to large citizen communities. +- Three Illustrative Simulations: We demonstrate the framework's capabilities through three distinct scenarios: presidential election prediction, breaking news feedback, and a national economic survey, providing a foundation for future research. + +# 2 Methods + +# Overall Framework + +The SocioVerse framework follows a structured pipeline to achieve realistic social simulation results, as shown in Figure 2: (1) Social Environment collects updated information and contextual knowledge. Within the simulation environment, (2) User Engine aligns the simulated agents with target users, (3) Scenario Engine aligns the interaction structure with diverse scenarios, and (4) Behavior Engine aligns the behavioral pattern with real-world target groups. + +# 2.1 Social Environment + +Function The social environment provides event-related context to align the simulation environment with real-world conditions. By integrating up-to-date events, social statistics, and preference content into LLM-based agents, it enhances the realism of the simulation and improve agent decision-making. + +Components The social environment should encompass as much real-world social, cultural, and technological context as possible. It can be broadly categorized into three types: social structural information, social dynamic information, and personalized context. + +Social Structure: Social structural information provides agents with a rich knowledge base encompassing demographic distributions, cultural norms, urban infrastructures, and collective behavior patterns [57]. This data allows agents to behave in a way that aligns with the typical characteristics of their assigned demographic or geographic profile. For example, by incorporating regional dialect preferences, work-life habits, and common social values, the simulation can more accurately reflect public discourse trends, mobility behaviors, and economic interactions. + +Social Dynamics: Social dynamics encompass time-sensitive content continuously generated in the real world, such as news events and policy changes. Typically, this engine maintains an up-to-date event base to continuously collect real-world event news from mainstream news, and all the news articles contain time stamps and event-related tags so that LLM-based agents can comb through the timeline of the events and react accordingly [37]. + +Personalized Context: In addition to the macro social environment, individuals also receive different personalized information feeds. Previous studies have explored that the recommendation system can enhance the behavior diversity of the agent [31, 56, 60]. Consequently, the preference content component constructs relevant posts and pushes them to agents according to their social interaction network and interesting topics. + +# 2.2 User Engine + +Function The user engine aligns simulated agents with a rich set of real-world user samples, enabling the creation of complex target users within the simulation. + +Components To support diverse user composition and effective user retrieval and description, the user engine incorporates a large user pool and a wide range of user labels. + +User Pools: The user pool is designed to collect extensive digital footprints of individuals across social media platforms, enabling a more comprehensive characterization of real-world behavioral patterns and expression tendencies. To this end, we constructed a user pool covering a variety of social media platforms, including $\mathbf{X}^1$ and Rednote2. Anomalous data, such as advertising and bot-generated content, is filtered by calculating the post frequency and average text similarity. The detailed procedure can be found in Appendix A. We index users and construct a user pool of 10 million users based on the collected social media posts. Formally, we define user pool as: $UserPool = \{U_i, P_i \mid i \in \mathbb{S}\}$ , where the $i$ -th user $U_i$ derives from the collection of social media platforms $\mathbb{S}$ with his/her related posts $P_i = \{P_{i,1}, P_{i,2}, \ldots\}$ . The statistical summary of the user pool is provided in Table 1. + +
Source# Users# Posts
X1,006,51730,195,510
Rednote9,158,40440,963,735
+ +Table 1: Statistical summary of the 10M user pool. + +User Labels: User labels refer to the tagging and description of users, which can be represented using discrete attributes or continuous representation. Demographic descriptions of users are the most commonly used form of labeling. However, they are often not directly accessible. Therefore, we designed a demographic annotation system to infer and label user attributes. The process begins with multiple LLMs serving as initial annotators, classifying users across various demographic dimensions. Human annotators then evaluate and refine the LLM-generated labels, ensuring the reliability of the user tags dataset. The curated dataset is subsequently used to train demographic classifiers, enabling large-scale annotation in a cost-effective manner. Specifically, we annotate users across 15 demographic dimensions: age, gender, vocation, race, income, education, settlement type, region, employment, marital status, religious, party, ideology, BigFive personality, and hobbies. Each attribute is inferred by a specialized classifier trained on the corresponding subset of the user tags dataset. See Appendix B for further details. + +# 2.3 Scenario Engine + +Function The scenario engine aligns various simulation structures with real-world contexts based on specific task formulations and scenario types, and then scales individual simulations by sampling according to demographic distributions provided by the user engine. + +Components The scenario engine formulates a wide range of real-world social situations, which can be summarized as archetypal scenario templates, including questionnaires, in-depth interviews, behavior experiments, and social media interaction. + +Questionnaire: The questionnaire scenario constructs the simulation in a 1-to-N manner, with one designed scale or questionnaire answered by multiple target users in a single round. This scenario is suitable for massive social investigation on specific topics, like election polls. + +Inddepth Interview: The in-depth interview scenario follows a 1-to-1 structure, where a simulated interviewer engages with an individual target user through multiple interaction rounds [43]. This iterative process allows for probing deeper into responses, clarifying ambiguities, and exploring underlying motivations. Such simulations are particularly useful for qualitative research on user experiences, psychological assessments, and exploratory studies where nuanced responses and detailed reasoning are essential. + +Behavior Experiment: The behavior experiment scenario is typically conducted in a 1-to-N or N-to-N format, depending on whether individual or group interactions are being studied [8, 42]. Simulated users are exposed to controlled conditions where their behavioral responses are observed across multiple rounds of interaction. These simulations help researchers examine decision-making processes, social influences, and cognitive biases in various experimental setups, such as consumer behavior studies or cooperative game simulations. + +Social Media Interaction: The social media interaction scenario adopts an N-to-N structure, where multiple simulated users engage in dynamic, multi-round exchanges in an online setting [30]. This scenario captures real-time interactions, including content sharing, comment threads, and viral spread dynamics, allowing researchers to analyze public discourse, opinion shifts, and information diffusion on social platforms. It is particularly valuable for studying trends in misinformation, political discussions, and network-based influence propagation. + +# 2.4 Behavior Engine + +Function The behavior engine aims to align the behaviors of the agents with that of real users. The behavior engine integrates user history and experience from the user engine, the interaction mechanism from the scenario engine and social context from the social environment to predict the behavior of each individual. + +Components To achieve credible behavior simulation, the behavior engine needs to provide a robust simulation foundation, including traditional agent-based models and a series of LLMs. + +Traditional Agent-Based Modeling: Traditional agent-based modeling (ABM) relies on rule-based and mathematical models [9, 23, 32, 47, 52], where interactions among agents are typically realized through the broadcasting of predefined values. These values are derived from heuristic functions or theoretical mathematical formulations. Traditional ABM approaches are highly scalable and computationally efficient, making them well-suited for simulating large populations, especially marginal users with relatively limited influence. + +LLM-powered Agents: LLMs leverage their role-playing capabilities to simulate user-generated content, and the abilities can be activated through various methods [29, 36, 51, 61-64]. Specifically, the behavior engine can be powered by general LLMs, expert LLMs, and domain-specific LLMs. Through non-parametric prompting, powerful general LLMs (e.g., GPT series and Qwen series) can act in accordance with predefined user profiles. Expert and domain-specific LLMs are acquired through parametric training, e.g., continual pretraining, supervised fine-tuning, and reinforcement learning. When target users exhibit complex profiles and the simulation requires deep domain expertise, these models are leveraged to enhance the professionalism and accuracy of agent behaviors. + +# 3 Implementation of Specific Scenarios + +We implement three representative social simulation scenarios through the SocioVerse framework based on the implemented components: (a) presidential election prediction of America, (b) breaking news feedback analysis, and (c) national economic survey of China. These scenarios respectively address political communication, journalistic dissemination, and socioeconomic domains, demonstrating the framework's generalizability through standardized implementation pipelines. + +![](images/41b8f7e83330617a874286474fbb177b1c17d9e2e45f3b39659735bbc7457011.jpg) +Figure 3: Illustration of three scenarios representing (a) presidential election prediction, (b) breaking news feedback, and (c) national economic survey. + +![](images/9839e0e42e838abe4b61415c929efb4951f849533401bc247dd8cf0d2900ba80.jpg) + +![](images/5cd70d2f9aa944b30cd8014a3f27307a2828ab5935fadc5daf426be35f3eedea.jpg) + +# 3.1 Presidential Election Prediction of America + +Task Description Presidential elections remain central to public engagement and party strategy formation [6, 46]. This study analyzes methods for large-scale election simulation using LLMs through the U.S. presidential system's Electoral College framework. In this indirect voting system, citizens vote for state electors (allocated by congressional representation) who formally elect the president. Most states employ a winner-takes-all allocation of electoral votes to the statewide majority winner, with our modeling focused on predicting these state-level outcomes. + +Target Group Distribution Extensive research has documented the influence of demographic factors on election outcomes [33, 53]. We model U.S. demographic and ideological diversity through integrated Census Bureau (2022 voting/registration) and ANES (2020) data [1]. This scenario incorporates 12 attributes from the user engine: socioeconomic (income, education, employment), geographic (region, area), and political (party, ideology) dimensions alongside demographic factors (age, gender, race, marital status, and religious status). Given available marginal distributions, we employ iterative proportional fitting (IPF) to synthesize agent populations, see Appendix C.1. + +Questionnaire Design We design the presidential election questionnaire based on abundant polls conducted by various media and research institutes [5, 24], incorporating both significant issues and + +voter preferences. These elements are then optimized into proper forms for LLM-based agents by the scenario engine. The entire questionnaire can be found in Appendix D.1. + +Evaluation Metric Two metrics are used to comprehensively compare the simulated election results to the real-world results. (1) Accuracy rate (Acc) is measured by calculating the proportion of states for which the election simulation results align with the actual result, serving as a coarse-grained evaluation metric. (2) Root Mean Square Error (RMSE) is measured by calculating the simulated vote share and the actual vote share for each state, which serves as a fine-grained evaluation metric. + +# 3.2 Breaking News Feedback + +Task Description Journalism plays a crucial role in shaping public perception and opinion through agenda-setting, framing, and information dissemination [20, 55]. Online social media platforms have gradually replaced the influence of traditional paper media. When breaking news is released on social media platforms, its potential audience may hold different stances. We take the release of ChatGPT as our target news to evaluate the accuracy and foreseeability of public attitudes. + +Target Group Distribution We define all Rednote users in our pool as the universal set, identifying technology-interested users as the potential audience set $\mathbb{P}$ , and those discussing ChatGPT via keyword matching as the ground truth set $\mathbb{G}$ , with $\mathbb{G} \subset \mathbb{P} \subset UserPool$ . Context is limited to pre-news timeframes to prevent leakage. Using the potential audience distribution as prior, we sample agents with identical distribution sampling (IDS) as $D_{s} = IDS(UserPool, \mathbb{P})$ , see Appendix C.2), considering demographics (gender, age, education, and consumption level) during sampling the user pools. Based on this, the task is to compare the consistency between the agents' attitudes toward news and those of the users in the ground truth set. + +Questionnaire Design We design the cognitive questionnaire using the ABC attitude model (Affect, Behavior, Cognition) [28], which outlines attitude formation as a hierarchy: cognition affects emotions, guiding behavior. Combined with a 5-point Likert scale [22], the questionnaire covers six dimensions: public cognition (PC), perceived risks (PR), perceived benefits (PB), trust (TR), fairness (FA), and public acceptance (PA). See Appendix D.2 for details. + +Evaluation Metric Agents from both sets answer the questionnaire for paired responses. Two evaluation dimensions assess feedback: (1) Normalized RMSE (NRMSE) measures point-wise differences between simulated and ground truth answers across PC, PR, PB, TR, FA, and PA as value evaluations; (2) KL-divergence (KL-Div) compares the 6-dimensional answer distributions between groups as consistency evaluations. + +# 3.3 National Economic Survey of China + +Task Description Economic simulation is another crucial part of massive social simulations as it models resource distribution, market dynamics, and financial behaviors, providing insights into economic stability and policy impacts [13, 54]. By integrating economic factors with social interactions, it enhances the prediction of systemic outcomes, guiding decision-making in areas such as governance, urban planning, and crisis management. We follow a national economic survey conducted by the National Bureau of Statistics of China, which interviews Chinese citizens on their monthly spending given the average salary of each province in China. + +Target Group Distribution The prior distribution is based on the methodology from the National Bureau of Statistics of China, which takes 160,000 families nationwide and calculates their incomes and spending as the national average statistics [39]. We sample nationwide agents from our user pool proportionally according to their region population and generate their income distribution according to the regional average income [38]. The detailed method can be referred to in Appendix C.3. + +Questionnaire Design Spending details in China Statistical Yearbook 2024 [40] are categorized into eight parts, i.e. food, clothing, housing, daily necessities & services, communication & transportation, education & entertainment, healthcare, and others. Consequently, the questionnaire design covers the above categories with examples and uses segmented interval options in each question. The entire questionnaire can be referred to in Appendix D.3. + +Evaluation Metric Both value evaluation and distribution evaluation are involved in the national economic survey as well. (1) NRMSE of the nine categories is measured between the simulated results and official statistics. (2) KL-Div is measured by taking the 8-item spending as a distribution to evaluate the consistency between the simulation and the real world. + +# 4 Results + +# 4.1 SocioVerse Can Support Diverse and Accurate Massive Social Simulations + +
Scenario# Agents# DemographicsTypeSamplingSourceLanguage# QuestionsGround truth
PresElectPredict331,83612labelIPFXEN49real world
BreakNewsFeed20,0007labelIDSrednoteZH18calculated
NatEconSurvey16,0009label+numberIDSrednoteZH17real world
+ +Experiment Settings We select powerful LLMs from different model families. For open-sourced models, we select Llama-3-70b-Instruct [14], Qwen2.5-72b-Instruct [59], DeepSeek-R1-671b [19], and DeepSeek-V3 [27]. For commercial models, we select GPT-4o $^3$ [41] and GPT-4o-mini $^4$ . + +We compare the settings of all three scenarios for better understanding, which is shown in Table 2. As the Presidential Election Prediction covers a 1-in-1,000 sample of the U.S. population, GPT-4o is excluded from comparison due to cost constraints. In terms of local model serving, Qwen2.5-72b-Instruct and Llama3-70b-Instruct models are both deployed on 8 NVIDIA RTX4090 GPUs via vLLM [25]. We set max tokens to 2,048 for all models to enable chain-of-thoughts during the generation and the temperature is set to 0.7 to encourage diversity. Implementation details for user pool construction and demographics annotation can be found in Appendix A and B. + +Table 2: Detail settings of three simulation scenarios, where PresElectPredict, BreakNewsFeed, and NatEconSurvey denote three simulations mentioned in the paper, respectively. IPF and IDS denote iterative proportional fitting and identical distribution sampling, see Appendix C. + +
ModelPresElectPredictBreakNewsFeedNatEconSurvey
OverallBattlegroundOverallDeveloped-region
Acc↑RMSE↓Acc↑RMSE↓KL-Div↓RMSE↓KL-Div↓RMSE↓KL-Div↓RMSE↓
Llama3-70b0.8430.0640.7330.0450.6680.1990.0160.0260.0130.025
Qwen2.5-72b0.9220.0370.8000.0310.1130.0590.0660.0480.0430.039
DeepSeek-R1-671b\\0.6700.0650.3830.0820.0590.0450.0450.036
DeepSeek-V30.9220.0460.8670.0410.2630.0720.0350.0360.0230.030
GPT-4o-mini\\0.8000.0390.1950.1140.0460.0450.0300.036
GPT-4o\\\\0.1960.0550.0620.0510.0360.038
+ +Table 3: Overall results of the three scenarios, where subset Battleground indicates battleground states in the U.S. in the presidential election and subset Developed-Region indicates top-10 developed regions in China in terms of GDP. + +Results The overall simulation results of the three scenarios are shown in Table 3. We also report subset results for presidential election prediction and national economic survey. + +- Presidential Election Prediction We report the overall results and the battleground states' results separately. The prediction of battleground states is challenging even in the real world and thus becomes the focus during the election process. According to the results, GPT-4o-mini and Qwen2.5-72b show competitive performance both in Acc and RMSE. Typically, according to the winner-takes-all rule, over $90\%$ state voting results are predicted correctly, which means the simulation achieves a high-precision macroscopic reduction of the real-world election results. After the case study, we find that DeepSeek-R1-671b sometimes falls into overthinking, resulting in less accurate results. + +- Breaking News Feedback The results measure the overall consistency of each model compared with the real-world users' reactions and attitudes. To this end, the performances of GPT-4o and Qwen2.5-72b are more aligned with real-world perspectives than other models in terms of KL-Div and NRMSE, respectively, and the following detailed analysis will demonstrate that the models consistently capture and accurately predict public trends and opinions. +- National Economic Survey We report the overall results and results for the top 10 regions by GDP (i.e., developed regions) separately. Generally, all the models closely align with real-world statistics. Llama3-70b shows a significant superiority over other models in the economic survey scenario and all the models perform better in the 1st-Region subset than overall. The results demonstrate that individuals' spending habits can be accurately reproduced under the SocioVerse framework, especially in developed regions. + +The overall results from both value evaluation and distribution evaluation of three simulations sufficiently prove that SocioVerse can support diverse and accurate massive social simulations with a standard pipeline and minimal changes with human experts in the loop. However, the choice of underlying LLMs can affect simulation precision across different scenarios, highlighting the need for further study. + +# 4.2 Prior Distribution and Real-World Knowledge Can Enhance Simulation Accuracy in Presidential Election Predictions + +
ModelAcc↑RMSE↓
Llama3-70b0.7330.045
- w/o Knowledge0.5330.051
- w/o Knowledge & Piror Distribution0.6000.386
Qwen2.5-72b0.8000.031
- w/o Knowledge0.8000.033
- w/o Knowledge & Piror Distribution0.6000.370
GPT-4o-mini0.8000.039
- w/o Knowledge0.8000.052
- w/o Knowledge & Piror Distribution0.6670.323
+ +Table 4: Ablation experiment results on the presidential election prediction simulation, where -w/o Knowledge denotes without real-world user knowledge and -w/o Piror Distribution denotes using random demographics distribution. + +We conduct an ablation study on the presidential election prediction simulation to assess the impact of prior demographics distribution and real-world user knowledge. As shown in Table 4, prior demographics distribution significantly improves the accuracy of the simulation in both Acc and RMSE compared to random demographics distribution. Additionally, past posts from users on social media platforms improve the fine-grained performance, especially for Llama3-70b in Acc and all the models in RMSE. We can tell from the ablation study that both prior distribution and real-world knowledge in the SocioVerse pipeline are significant during the simulation. + +# 4.3 Group Preference and Perspectives Can Be Well Reflected in Breaking News Feedback + +During the Breaking News Feedback simulation, the core concern is whether the preferences and perspectives of the target group are well captured and reflected in the results. We reformulate the original questionnaire into the Likert 6-dimension scale ranging from 1 to 5 points, representing from totally disagree to totally agree. As the ground truth of the simulation is calculated by prompting LLM agents from the ground truth set, the simulated and real results are paired for each model, as shown in Figure 4. All the models powered by the potential audience set during the simulation tend to behave consistently with the ground truth users. However, Llama3-70b perform poorly with a larger gap between the simulated and real results than other models. GPT-4o-mini shows different attitudes in the fairness (FA) and public acceptance (PA) dimensions, which may be because the news is related to OpenAI. Another trend indicates that, generally, all the models perform more + +![](images/f30fe370c7d43ea99207a6f091595cf5559b34c28ba881f238d60524e7804d01.jpg) +Figure 4: An illustration of the performances of the breaking news feedback simulation, where PC, PR, PB, TR, FA, and PA denote six dimensions from the Likert scale (see §3.2 questionnaire design), with 1-point standing for totally disagree and 5-point for totally agree. + +disagreeably in the simulated results than the real results, which also underlines the potential risk of biases during the public opinion simulation. + +# 4.4 The Capabilities of LLMs Vary in Different Domains in National Economic Survey + +
ItemLlama3-70bQwen2.5-72bGPT-4o-miniGPT-4oDeepSeek-R1
Daily0.0070.0090.0060.0100.009
Clothing0.0120.0150.0190.0150.015
Transportation_Comunication0.0160.0200.0270.0230.017
Education_Entertainment0.0180.0220.0240.0170.022
Medical0.0230.0620.0410.0570.060
Food0.0370.0310.0310.0400.032
Household0.0520.1100.1070.1200.102
Others0.0080.0080.0100.0050.009
+ +Table 5: Detailed results on the national economic survey simulation reported in NRMSE, where the Item column indicates the components of spending. The best results are **bolded*; the second-best results are underlined. + +The simulation of the national economic survey covers 8 spending dimensions. The overall results in Table 3 show the average performance of these dimensions, while model performances among these dimensions can also vary. We calculate the averaged NRMSE of 31 regions on each spending level, as shown in Table 5. It is worth mentioning that all the models show high consistency. Eliminating the others item, all the models perform best on daily necessities spending planning and worst on housing spending, which can reveal the LLM's preference on the economic decision-making and highlight the challenge in housing spending strategy. + +# 5 Discussion + +In this study, we introduce a generalized social simulation framework SocioVerse and evaluated its performance across three distinct real-world scenarios. Our findings indicate that state-of-the-art LLMs demonstrate a notable ability to simulate human responses in complex social contexts, although some gaps still remain between the simulated response and observed real-world outcomes. Therefore, future research may need to incorporate a broader range of scenarios and develop more fine-grained evaluations built upon the current analytic engine, to further explore and expand the boundaries of LLMs' simulation capabilities. Such efforts could pave the way for establishing LLMs as comprehensive and reliable tools for large-scale social simulation. + +We observed several key patterns across the simulations of the scenarios. First, incorporating demographic distributions and users' historical experiences significantly improved simulation accuracy. These findings highlight the importance of building a large, demographically rich user pool, complemented by a multi-dimensional user tagging system for more precise modeling of group-specific behaviors. Second, under consistent measurement protocols, LLMs produced broadly similar simulations of human attitudes and ideologies. However, certain models, such as GPT-4o-mini, showed notable inconsistencies, indicating that model-specific preferences or biases remain influential and warrant closer scrutiny in future work. Finally, we found that while LLMs perform well in simple daily + +scenarios, they underperform in complex situations requiring contextual knowledge, underscoring the need to align model behavior with real-world experiences and social contexts. + +Notably, the current version has only implemented part of our framework, indicating significant potential for enhancing the accuracy and quality of social simulations. Future work can focus on refining each module for better collaboration, enabling the framework to achieve its full potential. For instance, the incorporation of the social environment can inject up-to-date knowledge into LLMs, enhancing the understanding of social dynamics. The scenario engine can not only provide survey-based simulation but also expand to diverse formats such as social interviews and free interactions. Additionally, further optimization of the general LLMs and expert LLMs adaptation in the behavior engine will enable better accommodation of complex target user groups, such as minority groups and individuals with special disabilities. The analysis engine can introduce an autonomous planning module to improve the overall credibility of simulation results. + +Beyond the social simulation framework, our work underscores the potential to bridge the gap between autonomous AI systems and traditional social science, offering social scientists a seamless, cost-effective tool for conducting social experiments with minimal setup. Such tools not only help analyze and validate psychological and sociological theories or hypotheses, such as behavioral economics and social identity theory, but also assist in predicting large-scale social impacts like policy changes, social movements, or public health crises. By providing an efficient and scalable simulation environment, our framework is not just a research tool, but an experimental platform for exploring the dynamic changes and long-term trends of virtual societies, with the aim of becoming a realistic mapping for real-world societies. + +# Acknowledgement + +We would like to express our sincere gratitude to Professor Rongwei Chu and his research team for their invaluable support in this work. The project's computational resources are supported by the CFFF platform of Fudan University. + +# References + +[1] American National Election Studies. Anes 2020 time series study full release [dataset and documentation], 2021. February 10, 2022 version. +[2] J. R. Anthis, R. Liu, S. M. Richardson, A. C. Kozlowski, B. Koch, J. Evans, E. Brynjolfsson, and M. Bernstein. LIm social simulations are a promising research method. arXiv preprint arXiv:2504.02234, 2025. +[3] L. P. Argyle, E. C. Busby, N. Fulda, J. R. Gubler, C. Rytting, and D. Wingate. Out of one, many: Using language models to simulate human samples. Political Analysis, 31(3):337-351, 2023. +[4] Z. Bao, Q. Liu, Y. Guo, Z. Ye, J. Shen, S. Xie, J. Peng, X. Huang, and Z. Wei. Piers: Personalized intelligent outpatient reception based on large language model with multi-agents medical scenario simulation. arXiv preprint arXiv:2411.13902, 2024. +[5] A. Barnett and A. Sarfati. The polls and the us presidential election in 2020.... and 2024. Statistics and Public Policy, 10(1):2199809, 2023. +[6] L. M. Bartels. Uninformed votes: Information effects in presidential elections. American journal of political science, pages 194-230, 1996. +[7] I. Beltagy, M. E. Peters, and A. Cohan. Longformer: The long-document transformer. arXiv preprint arXiv:2004.05150, 2020. +[8] A. K. Chandra, D. C. Kozen, and L. J. Stockmeyer. Alternation. Journal of the Association for Computing Machinery, 28(1):114-133, 1981. +[9] Y.-S. Chuang and T. T. Rogers. Computational agent-based models in opinion dynamics: A survey on social simulations and empirical studies. arXiv preprint arXiv:2306.03446, 2023. +[10] V. Cologna, N. G. Mede, S. Berger, J. Besley, C. Brick, M. Joubert, E. W. Maibach, S. Mihelj, N. Oreskes, M. S. Schäfer, et al. Trust in scientists and their role in society across 68 countries. Nature Human Behaviour, pages 1–18, 2025. +[11] T. Connolly. Micromotives and macrobehavior., 1979. +[12] J. Devlin, M.-W. Chang, K. Lee, and K. Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2019. +[13] F. Dignum, V. Dignum, P. Davidsson, A. Ghorbani, M. van der Hurk, M. Jensen, C. Kammler, F. Lorig, L. G. Ludescher, A. Melchior, et al. Analysing the combined health, social and economic impacts of the coronavirus pandemic using agent-based social simulation. *Minds and Machines*, 30:177–194, 2020. +[14] A. Dubey, A. Jauhri, A. Pandey, A. Kadian, A. Al-Dahle, A. Letman, A. Mathur, A. Schelten, A. Yang, A. Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024. +[15] C. Gao, X. Lan, N. Li, Y. Yuan, J. Ding, Z. Zhou, F. Xu, and Y. Li. Large language models empowered agent-based modeling and simulation: A survey and perspectives. *Humanities and Social Sciences Communications*, 11(1):1-24, 2024. +[16] C. Gao, X. Lan, Z. Lu, J. Mao, J. Piao, H. Wang, D. Jin, and Y. Li. S3: Social-network simulation system with large language model-empowered agents. arXiv preprint arXiv:2307.14984, 2023. +[17] S. Giorgi, V. E. Lynn, K. Gupta, F. Ahmed, S. Matz, L. H. Ungar, and H. A. Schwartz. Correcting sociodemographic selection biases for population prediction from social media. In Proceedings of the International AAAI Conference on Web and Social Media, volume 16, pages 228-240, 2022. +[18] B. E. GOLDSMITH, Y. HORIUCHI, and K. MATUSH. Does public diplomacy sway foreign public opinion? identifying the effect of high-level visits. American Political Science Review, 115(4):1342-1357, 2021. +[19] D. Guo, D. Yang, H. Zhang, J. Song, R. Zhang, R. Xu, Q. Zhu, S. Ma, P. Wang, X. Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025. +[20] B. Gómez-Calderón and Y. Ceballos. Journalism and artificial intelligence: the treatment of the chatbots in the Spanish press. index.comunicación, 14(1):281–300, Jan. 2024. + +[21] J. C. Jackson, D. Rand, K. Lewis, M. I. Norton, and K. Gray. Agent-based modeling: A guide for social psychologists. Social Psychological and Personality Science, 8(4):387-395, 2017. +[22] A. Joshi, S. Kale, S. Chandel, and D. K. Pal. Likert scale: Explored and explained. British journal of applied science & technology, 7(4):396-403, 2015. +[23] M. Jusup, P. Holme, K. Kanazawa, M. Takayasu, I. Romić, Z. Wang, S. Geček, T. Lipić, B. Podobnik, L. Wang, et al. Social physics. Physics Reports, 948:1-148, 2022. +[24] S. Keeter, N. Hatley, A. Lau, and C. Kennedy. What 2020's election poll errors tell us about the accuracy of issue polling. Pew Research Center Methods, 2021. +[25] W. Kwon, Z. Li, S. Zhuang, Y. Sheng, L. Zheng, C. H. Yu, J. E. Gonzalez, H. Zhang, and I. Stoica. Efficient memory management for large language model serving with pagedattention. In Proceedings of the ACM SIGOPS 29th Symposium on Operating Systems Principles, 2023. +[26] S. Lee, T.-Q. Peng, M. H. Goldberg, S. A. Rosenthal, J. E. Kotcher, E. W. Maibach, and A. Leiserowitz. Can large language models capture public opinion about global warming? an empirical assessment of algorithmic fidelity and bias. arXiv preprint arXiv:2311.00217, 2023. +[27] A. Liu, B. Feng, B. Xue, B. Wang, B. Wu, C. Lu, C. Zhao, C. Deng, C. Zhang, C. Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024. +[28] B. Liu, Y. Xu, Y. Yang, and S. Lu. How public cognition influences public acceptance of ccus in china: Based on the abc (affect, behavior, and cognition) model of attitudes. Energy Policy, 156:112390, 2021. +[29] X. Liu, S. Yang, X. Zhang, H. Kuang, L. Sun, Y. Yang, S. Chen, X. Huang, and Z. Wei. Ai-press: A multi-agent news generating and feedback simulation system powered by large language models. arXiv preprint arXiv:2410.07561, 2024. +[30] Y. Liu, X. Chen, X. Zhang, X. Gao, J. Zhang, and R. Yan. From skepticism to acceptance: Simulating the attitude dynamics toward fake news. arXiv preprint arXiv:2403.09498, 2024. +[31] H. Lyu, S. Jiang, H. Zeng, Y. Xia, Q. Wang, S. Zhang, R. Chen, C. Leung, J. Tang, and J. Luo. Llm-rec: Personalized recommendation via prompting large language models. arXiv preprint arXiv:2307.15780, 2023. +[32] C. M. Macal and M. J. North. Agent-based modeling and simulation. In Proceedings of the 2009 winter simulation conference (WSC), pages 86-98. IEEE, 2009. +[33] B. Major, A. Blodorn, and G. Major Blascovich. The threat of increasing diversity: Why many white americans support trump in the 2016 presidential election. Group Processes & Intergroup Relations, 21(6):931-940, 2018. +[34] X. Mou, X. Ding, Q. He, L. Wang, J. Liang, X. Zhang, L. Sun, J. Lin, J. Zhou, X. Huang, et al. From individual to society: A survey on social simulation driven by large language model-based agents. arXiv preprint arXiv:2412.03563, 2024. +[35] X. Mou, Z. Li, H. Lyu, J. Luo, and Z. Wei. Unifying local and global knowledge: Empowering large language models as political experts with knowledge graphs. In Proceedings of the ACM Web Conference 2024, pages 2603–2614, 2024. +[36] X. Mou, J. Liang, J. Lin, X. Zhang, X. Liu, S. Yang, R. Ye, L. Chen, H. Kuang, X. Huang, and Z. Wei. Agentsense: Benchmarking social intelligence of language agents through interactive scenarios, 2024. +[37] X. Mou, Z. Wei, and X. Huang. Unveiling the truth and facilitating change: Towards agent-based large-scale social movement simulation. arXiv preprint arXiv:2402.16333, 2024. +[38] NBS China. Communiqué of the Seventh National Population Census of the People's Republic of China. Technical report, 2023. Accessed: 2025-02-14. +[39] NBS China. Explanatory Notes on Main Statistical Indicators – Population, Society, and Labor (China Statistical Yearbook 2023), 2023. Accessed: 2025-02-14. +[40] NBS China. China Statistical Yearbook 2024, 2024. Accessed: 2025-02-14. +[41] OpenAI. GPT-4o System Card. Technical report, 2024. Accessed: 2025-02-14. + +[42] J. S. Park, J. O'Brien, C. J. Cai, M. R. Morris, P. Liang, and M. S. Bernstein. Generative agents: Interactive simulacra of human behavior. In Proceedings of the 36th annual acm symposium on user interface software and technology, pages 1-22, 2023. +[43] J. S. Park, C. Q. Zou, A. Shaw, B. M. Hill, C. Cai, M. R. Morris, R. Willer, P. Liang, and M. S. Bernstein. Generative agent simulations of 1,000 people. arXiv preprint arXiv:2411.10109, 2024. +[44] L. Peisakhin, N. Stoop, and P. Van der Windt. Who hosts? the correlates of hosting the internally displaced. American Political Science Review, pages 1-16, 2024. +[45] F. Ribeiro, L. Henrique, F. Benevenuto, A. Chakraborty, J. Kulshrestha, M. Babaei, and K. Gummadi. Media bias monitor: Quantifying biases of social media news outlets at large-scale. In Proceedings of the International AAAI Conference on Web and Social Media, volume 12, 2018. +[46] S. J. Rosenstone. Forecasting presidential elections. 1981. +[47] T. C. Schelling. Models of segregation. The American economic review, 59(2):488-493, 1969. +[48] T. C. Schelling. Dynamic models of segregation. Journal of mathematical sociology, 1(2):143-186, 1971. +[49] Y. Shao, L. Li, J. Dai, and X. Qiu. Character-llm: A trainable agent for role-playing. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 13153–13187, 2023. +[50] E. R. Smith and F. R. Conrey. Agent-based modeling: A new approach for theory building in social psychology. *Personality and social psychology review*, 11(1):87-104, 2007. +[51] L. Sun, S. Wang, X. Huang, and Z. Wei. Identity-driven hierarchical role-playing agents. arXiv preprint arXiv:2407.19412, 2024. +[52] S. Tang. Idea, action, and outcome. Innovation in the Social Sciences, 2(2):123-170, 2024. +[53] R. A. Teixeira. Red, blue, and purple America: the future of election demographics. Rowman & Littlefield, 2009. +[54] T. Trimborn, P. Otte, S. Cramer, M. Beikirch, E. Pabich, and M. Frank. Subcemm: A simulator for agent-based computational economic market models. Computational economics, 55(2):707-744, 2020. +[55] A. van Dalen. Revisiting the algorithms behind the headlines. how journalists respond to professional competition of generative ai. Journalism Practice, pages 1-18, 2024. +[56] L. Wang, J. Zhang, H. Yang, Z. Chen, J. Tang, Z. Zhang, X. Chen, Y. Lin, R. Song, W. X. Zhao, et al. User behavior simulation with large language model based agents. arXiv preprint arXiv:2306.02552, 2023. +[57] K. Wu, X. Mou, L. Xue, Z. Ying, W. Wang, Q. Zhang, X.-J. Huang, and Z. Wei. Pasum: A pre-training architecture for social media user modeling based on text graph. In Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024), pages 12644-12656, 2024. +[58] B. Xiao, Z. Yin, and Z. Shan. Simulating public administration crisis: A novel generative agent-based simulation system to lower technology barriers in social science research. arXiv preprint arXiv:2311.06957, 2023. +[59] A. Yang, B. Yang, B. Zhang, B. Hui, B. Zheng, B. Yu, C. Li, D. Liu, F. Huang, H. Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024. +[60] Z. Yang, Z. Zhang, Z. Zheng, Y. Jiang, Z. Gan, Z. Wang, Z. Ling, J. Chen, M. Ma, B. Dong, et al. Oasis: Open agents social interaction simulations on one million agents. arXiv preprint arXiv:2411.11581, 2024. +[61] R. Ye, Y. Zhang, Y. Zhang, H. Kuang, Z. Wei, and P. Sun. Multi-agent kto: Reinforcing strategic interactions of large language model in language game. arXiv preprint arXiv:2501.14225, 2025. +[62] S. Yue, S. Wang, W. Chen, X. Huang, and Z. Wei. Synergistic multi-agent framework with trajectory learning for knowledge-intensive tasks. arXiv preprint arXiv:2407.09893, 2024. +[63] X. Zhang, H. Kuang, X. Mou, H. Lyu, K. Wu, S. Chen, J. Luo, X. Huang, and Z. Wei. SoMeLVLM: A large vision language model for social media processing. In L.-W. Ku, A. Martins, and V. Srikumar, editors, Findings of the Association for Computational Linguistics ACL 2024, pages 2366-2389, Bangkok, Thailand and virtual meeting, Aug. 2024. Association for Computational Linguistics. +[64] X. Zhang, J. Lin, L. Sun, W. Qi, Y. Yang, Y. Chen, H. Lyu, X. Mou, S. Chen, J. Luo, et al. Electionsim: Massive population election simulation powered by large language model driven agents. arXiv preprint arXiv:2410.20746, 2024. + +# A Data Cleaning Details + +# A.1 Content Data Extraction + +We extract only post-related content on all the social media platforms to avoid violating privacy policies. Specifically, the data list on each platform is shown in Table 6. + +
PlatformData list
Xuser ID, tweet, #likes, #coments, #retweets
Rednoteuser ID, notes, #likes, #comments
+ +Table 6: Data list for each social media platform during the data collection. + +# A.2 Abnormal Data Filtering + +We filter the abnormal data to guarantee the quality through text similarity calculation. Typically, all the textual content from the same user is calculated by means of the word repetition ratio. The threshold is set to 0.3. If the ratio surpasses the threshold, the user is considered likely to be a robot or advertising and will be filtered. + +# B Demographics Annotation System + +# B.1 LLM Annotation + +To save costs, we first sample a subset of the user pool and employ multiple power LLMs for annotation. Due to the long time span of this work, users from different data sources in the user pool have used the powerful LLMs available at the time. For users derived from the X, GPT-4o $^5$ , Claude3.5-Sonnet $^6$ , and Gemini-1.5 $^7$ are employed. For users derived from the Rednote, GPT-4o, Cluade3.5-Sonnet, and Qwen2.5-72b are employed. + +# B.2 Human Evaluation + +We employ 7 professional human annotators to verify the results annotated by LLMs. Typically, each annotator is required to re-associate the demographic factors without the LLM labels. All the data are verified by at least 2 human annotators. The overall consistency between humans and LLMs is shown in Table 7. + +
ModelsHuman (X)Human (Rednote)
GPT-4o0.9050.723
Claude3.50.9010.659
Gemini-1.50.713\
Qwen2.5\0.846
Majority votes0.9560.849
+ +# B.3 Classifier Training + +We take the majority-voted labels from different LLMs to construct the training dataset. Considering the difference in mainstream language used on different platforms, we employ LongFormer [7] for X data and employ Bert-base-chinese [12] for Rednote. The implementation details are shown in Table 8. + +Table 7: Human annotators' verification results. We report the consistency between humans and different LLMs. + +
ParamsLongFormerBert-base-chinese
train_size10,00010,000
# classifiers54
max_tokens4096512
learning_rate5e-55e-5
batch_size1632
optimizerAdamWAdamW
epochs310
device8*40902*4090
+ +Table 8: Implementation details for demographic classifiers. + +We report the performances of demographic classifiers on each demographic factor in Table 9. + +# B.4 Overall Distribution of the User Pool + +We employ the demographic classifiers to annotate all of the users in the user pool, and the overall distributions are shown in Figure 5. For other demographics in specific simulations that are not + +
DemosLongFormerBert-base-chinese
AccF1AccF1
Gender0.8750.9040.9260.958
Age0.9020.8730.9250.920
Party0.8490.846\\
Ideology0.8100.807\\
Race0.7790.768\\
Consumption\\0.7490.748
Education\\0.9540.975
+ +Table 9: Performance of demographic classifiers on test set. + +considered in prior distribution, only users from the sampled user pool are annotated by the majority votes of LLMs. + +![](images/0cb012c6c55c49fd65fa15745af750ea86382e3c5992b62bd56df63efb6706fc.jpg) +Figure 5: Demographic distribution on X and Rednote user pool. + +![](images/cb26ecd99c58640d8eb4effc287a4e8bee0c4214555edb59f3d149882199fb82.jpg) + +# C Demographic Distribution Sampling Details + +# C.1 Iterative Proportional Fitting + +In our study, we follow the classical IPF method to construct the joint distribution of all the attributes in our simulation. Specifically, we start with a two-way table with individual components denoted as $x_{ij}$ and targeted estimation $\hat{x}_{ij}$ . The targeted estimation $\hat{x}_{ij}$ satisfies $\Sigma_j\hat{x}_{ij} = v_i$ and $\Sigma_i\hat{x}_{ij} = w_j$ . The iterations are specified as follows: + +Let $\hat{x}_{ij}^{(0)} = x_{ij}$ . For $\alpha > 1$ : + +$$ +\hat {x} _ {i j} ^ {(2 \alpha - 1)} = \frac {\hat {x} _ {i j} ^ {(2 \alpha - 2)} v _ {i}}{\sum_ {k = 1} ^ {J} \hat {x} _ {i j} ^ {(2 \alpha - 2)}} \tag {1} +$$ + +$$ +\hat {x} _ {i j} ^ {(2 \alpha)} = \frac {\hat {x} _ {i j} ^ {(2 \alpha - 1)} w _ {j}}{\Sigma_ {k = 1} ^ {I} \hat {x} _ {i j} ^ {(2 \alpha - 1)}} \tag {2} +$$ + +The iterations end when the estimated marginals are sufficiently close to the real marginals or when they stabilize without further convergence. + +For the presidential election simulation, we implement the IPF algorithm for each state using five attributes: gender, race, age group, ideology, and partisanship. In most cases, the algorithm does not converge, but the gaps between the estimated and actual marginals are less than $5\%$ , with 888 out of 918 marginals falling within this range. For the outliers, since IPF adjusts proportionally to the marginals, the overall ratio of marginals remains consistent. We then use the estimated joint distribution and marginals for our massive simulation. + +# C.2 Identical Distribution Sampling + +Identical distribution sampling, also known as direct sampling, is applied when the joint distribution of multiple demographics is available. Given feature $X$ and $Y$ , the joint distribution can be formulated as $p(X,Y)$ . Then, identical distribution sampling can be formulated as follows: + +$$ +\left(X _ {i}, Y _ {i}\right) \sim p (X, Y) \quad i = 1, 2, \dots , n \tag {3} +$$ + +For breaking news feedback simulations, as the ground truth set is directly from the Rednote, we can obtain all the users' demographics and calculate the joint distribution. Simultaneously, the scale of the user pool satisfies the direct sampling requirements. + +# C.3 Prior Distribution of National Economic Survey + +For the national economic survey distribution, only average income is available from the official data. As a result, we generate the prior income distribution at the regional level. The income distribution across different regions exhibits significant heterogeneity, often characterized by a right-skewed pattern. To model this distribution, we adopt a mixture distribution approach, combining a lognormal distribution for the majority of the population with a Pareto distribution for the high-income segment. This hybrid model captures both the bulk of wage earners and the long-tail effect observed in high-income groups. + +Formally, let $X$ denote an individual's wage. We assume that for the lower and middle-income groups $(X < x_{\min})$ , incomes follow a log-normal distribution: + +$$ +X \sim \log \text {N o r m a l} (\mu , \sigma^ {2}) \tag {4} +$$ + +where + +$$ +\mu = \ln \left(\frac {\mu_ {\text {a c t u a l}} ^ {2}}{\sqrt {\sigma_ {\text {a c t u a l}} ^ {2} + \mu_ {\text {a c t u a l}} ^ {2}}}\right), \quad \sigma = \sqrt {\ln \left(1 + \frac {\sigma_ {\text {a c t u a l}} ^ {2}}{\mu_ {\text {a c t u a l}} ^ {2}}\right)} \tag {5} +$$ + +For the high-income group $(X\geq x_{min})$ , wages follow a Pareto distribution: + +$$ +P (X \geq x) = C x ^ {- \alpha}, \quad x \geq x _ {\min } \tag {6} +$$ + +where $\alpha$ is the Pareto shape parameter determining the income concentration at the top. The proportion of individuals assigned to each distribution is governed by an empirical threshold ratio, typically set such that $90\%$ of the population follows the log-normal distribution while $10\%$ follows the Pareto distribution. This mixture approach provides a flexible yet robust framework for simulating realistic income distributions across diverse economic conditions. We set all the parameters empirically according to previous research and generate the income distribution for 31 regions in China (Hong Kong, Macao, and Taiwan are excluded). + +# D Questionnaire Design Details + +We provide the questionnaires here for all three simulations. + +# D.1 Questionnaire for Presidential Election Prediction + +
Q01Voting Behavior
QuestionORDER OF MAJOR PARTY CANDIDATE NAMES
Value Labels1. Democrat first / Republican second2. Republican first / Democrat second
Q02Social Security
QuestionNext I am going to read you a list of federal programs. For each one, I would like you to tell me whether you would like to see spending increased, decreased, or kept the same.What about Social Security? Should federal spending on Social Security be increased, decreased, or kept the same?
Value Labels-2. DK/RF1. Increased2. Decreased3. Kept the same
Q03Education
QuestionWhat about public schools? Should federal spending on public schools be increased, decreased, or kept the same?
Value Labels-2. DK/RF1. Increased2. Decreased3. Kept the same
Q04Immigration
QuestionWhat about tightening border security to prevent illegal immigration? Should federal spending on tightening border security to prevent illegal immigration be increased, decreased, or kept the same?
Value Labels-2. DK/RF1. Increased2. Decreased3. Kept the same
Q05Criminal Justice
QuestionWhat about dealing with crime? Should federal spending on dealing with crime be increased, decreased, or kept the same?
Value Labels-2. DK/RF1. Increased2. Decreased3. Kept the same
Q06Social Welfare
QuestionWhat about welfare programs? Should federal spending on welfare programs be increased, decreased, or kept the same?
Value Labels-2. DK/RF1. Increased2. Decreased3. Kept the same
Q07Infrastructure
QuestionWhat about building and repairing highways? Should federal spending on building and repairing highways be increased, decreased, or kept the same?
Value Labels-2. DK/RF +1. Increased +2. Decreased +3. Kept the same
Q08Aid to Poor
QuestionWhat about aid to the poor? Should federal spending on aid to the poor be increased, decreased, or kept the same?
Value Labels-2. DK/RF +1. Increased +2. Decreased +3. Kept the same
Q09Environment
QuestionWhat about protecting the environment? Should federal spending on protecting the environment be increased, decreased, or kept the same?
Value Labels-2. DK/RF +1. Increased +2. Decreased +3. Kept the same
Q10Government
QuestionHow much do you feel that having elections makes the government pay attention to what the people think?
Value Labels-2. DK/RF +1. A good deal +2. Some +3. Not much
Q11Economy
QuestionWhich party do you think would do a better job of handling the nation's economy?
Value Labels-2. DK/RF +1. Democrats would do a better job +2. Not much difference between them +3. Republicans would do a better job
Q12Health Care
QuestionWhich party do you think would do a better job of handling health care?
Value Labels-2. DK/RF +1. Democrats would do a better job +2. Not much difference between them +3. Republicans would do a better job
Q13Immigration
QuestionWhich party do you think would do a better job of handling immigration?
Value Labels-2. DK/RF +1. Democrats would do a better job +2. Not much difference between them +3. Republicans would do a better job
Q14Taxes
QuestionWhich party do you think would do a better job of handling taxes?
Value Labels-2. DK/RF +1. Democrats would do a better job +2. Not much difference between them +3. Republicans would do a better job
Q15Environment
QuestionWhich party do you think would do a better job of handling the environment?
Value Labels-2. DK/RF +1. Democrats would do a better job +2. Not much difference between them +3. Republicans would do a better job
Q16Education
QuestionSome people think the government should provide fewer services even in areas such as health and education in order to reduce spending. +Other people feel it is important for the government to provide many more services even if it means an increase in spending. +And, of course, some people have a neutral position. +Which of the following best describes your view?
Value Labels-2. DK/RF +1. Government should provide fewer services +2. Neutral +3. Government should provide more services
Q17Defense
QuestionSome people believe that we should spend less money for defense. +Others feel that defense spending should be increased. +And, of course, some people have a neutral position. +Which of the following best describes your view?
Value Labels-2. DK/RF +1. Decrease defense spending +2. Neutral +3. Increase defense spending
Q18Health Care
QuestionThere is much concern about the rapid rise in medical and hospital costs. +Some people feel there should be a government insurance plan which would cover all medical and hospital expenses for everyone. +Others feel that all medical expenses should be paid by individuals through private insurance plans like Blue Cross or other company paid plans. +And, of course, some people have a neutral position. +Which of the following best describes your view?
Value Labels-2. DK/RF +1. Government insurance plan +2. Neutral +3. Private insurance plan
Q19Social Welfare
QuestionSome people feel the government in Washington should see to it that every person has a job and a good standard of living. +Others think the government should just let each person get ahead on their own. +And, of course, some people have a neutral position. +Which of the following best describes your view?
Value Labels-2. DK/RF +1. Government should see to jobs and standard of living +2. Neutral +3. Government should let each person get ahead on own
Q20Aid to Blacks
QuestionSome people feel that the government in Washington should make every effort to improve the social and economic position of blacks. +Others feel that the government should not make any special effort to help blacks because they should help themselves. +And, of course, some people have a neutral position. +Which of the following best describes your view?
Value Labels-2. DK/RF +1. Government should help blacks +2. Neutral +3. Blacks should help themselves
Q21Environment
QuestionSome people think we need much tougher government regulations on business in order to protect the environment. +Others think that current regulations to protect the environment are already too much of a burden on business. +And, of course, some people have a neutral position. +Which of the following best describes your view?
Value Labels-2. DK/RF +1. Tougher regulations on business needed to protect environment +2. Neutral +3. Regulations to protect environment already too much a burden on business
Q22Abortion
QuestionWould you be pleased, upset, or neither pleased nor upset if the Supreme Court reduced abortion rights?
Value Labels-2. DK/RF +1. Pleased +2. Upset +3. Neither pleased nor upset
Q23Criminal Justice
QuestionDo you favor or oppose the death penalty for persons convicted of murder?
Value Labels-2. DK/RF +1. Favor +2. Oppose
Q24US Position in World
QuestionDo you agree or disagree with this statement: ‘This country would be better off if we just stayed home and did not concern ourselves with problems in other parts of the world.’
Value Labels-2. DK/RF +1. Agree +2. Disagree
Q25US Position in World
QuestionHow willing should the United States be to use military force to solve international problems?
Value Labels-2. DK/RF +1. Willing +2. Moderately willing +3. Not willing
Q26Inequality
QuestionDo you think the difference in incomes between rich people and poor people in the United States today is larger, smaller, or about the same as it was 20 years ago?
Value Labels-2. DK/RF +1. Larger +2. Smaller +3. About the same
Q27Environment
QuestionDo you think the federal government should be doing more about rising temperatures, should be doing less, or is it currently doing the right amount?
Value Labels-2. DK/RF +1. Should be doing more +2. Should be doing less +3. Is currently doing the right amount
Q28Parental Leave
QuestionDo you favor, oppose, or neither favor nor oppose requiring employers to offer paid leave to parents of new children?
Value Labels-2. DK/RF +1. Favor +2. Oppose +3. Neither favor nor oppose
Q29LGBTQ+ Rights
QuestionDo you think business owners who provide wedding-related services should be allowed to refuse services to same-sex couples if same-sex marriage violates their religious beliefs, or do you think business owners should be required to provide services regardless of a couple's sexual orientation?
Value Labels-2. DK/RF +1. Should be allowed to refuse +2. Should be required to provide services
Q30LGBTQ+ Rights
QuestionShould transgender people - that is, people who identify themselves as the sex or gender different from the one they were born as - have to use the bathrooms of the gender they were born as, or should they be allowed to use the bathrooms of their identified gender?
Value Labels-2. DK/RF +1. Have to use the bathrooms of the gender they were born as +2. Be allowed to use the bathrooms of their identified gender
Q31LGBTQ+ Rights
QuestionDo you favor or oppose laws to protect gays and lesbians against job discrimination?
Value Labels-2. DK/RF +1. Favor +2. Oppose
Q32LGBTQ+ Rights
QuestionDo you think gay or lesbian couples should be legally permitted to adopt children?
Value Labels-2. DK/RF +1. Yes +2. No
Q33LGBTQ+ Rights
QuestionWhich comes closest to your view? You can just tell me the number of your choice.
Value Labels-2. DK/RF 1. Gay and lesbian couples should be allowed to legally marry +2. Gay and lesbian couples should be allowed to form civil unions but not legally marry +3. There should be no legal recognition of gay or lesbian couples' relationship
Q34Immigration
QuestionSome people have proposed that the U.S. Constitution should be changed so that the children of unauthorized immigrants do not automatically get citizenship if they are born in this country. +Do you favor, oppose, or neither favor nor oppose this proposal?
Value Labels-2. DK/RF +1. Favor +2. Oppose +3. Neither favor nor oppose
Q35Immigration
QuestionWhat should happen to immigrants who were brought to the U.S. illegally as children and have lived here for at least 10 years and graduated high school here? Should they be sent back where they came from, or should they be allowed to live and work in the United States?
Value Labels-2. DK/RF +1. Should be sent back where they came from +2. Should be allowed to live and work in the US
Q36Immigration
QuestionDo you favor, oppose, or neither favor nor oppose building a wall on the U.S. border with Mexico?
Value Labels-2. DK/RF +1. Favor +2. Oppose +3. Neither favor nor oppose
Q37Unrest
QuestionDuring the past few months, would you say that most of the actions taken by protestors to get the things they want have been violent, or have most of these actions by protestors been peaceful, or have these actions been equally violent and peaceful?
Value Labels-2. DK/RF +1. Mostly violent +2. Mostly peaceful +3. Equally violent and peaceful
Q38Government
QuestionDo you think it is better when one party controls both the presidency and Congress, better when control is split between the Democrats and Republicans, or doesn’t it matter?
Value Labels-2. DK/RF +1. Better when one party controls both +2. Better when control is split +3. It doesn’t matter
Q39Government
QuestionWould you say the government is pretty much run by a few big interests looking out for themselves or that it is run for the benefit of all the people?
Value Labels-2. DK/RF +1. Run by a few big interests +2. For the benefit of all the people
Q40Government
QuestionDo you think that people in government waste a lot of the money we pay in taxes, waste some of it, or don’t waste very much of it?
Value Labels-2. DK/RF +1. Waste a lot +2. Waste some +3. Don’t waste very much
Q41Election Integrity
QuestionDo you favor, oppose, or neither favor nor oppose allowing convicted felons to vote once they complete their sentence?
Value Labels-2. DK/RF +1. Favor +2. Oppose +3. Neither favor nor oppose
Q42Democratic Norms
QuestionHow important is it that news organizations are free to criticize political leaders?
Value Labels-2. DK/RF +1. Not important +2. Moderately important +3. Important
Q43Democratic Norms
QuestionHow important is it that the executive, legislative, and judicial branches of government keep one another from having too much power?
Value Labels-2. DK/RF +1. Not important +2. Moderately important +3. Important
Q44Democratic Norms
QuestionHow important is it that elected officials face serious consequences if they engage in misconduct?
Value Labels-2. DK/RF +1. Not important +2. Moderately important +3. Important
Q45Democratic Norms
QuestionHow important is it that people agree on basic facts even if they disagree politically?
Value Labels-2. DK/RF +1. Not important +2. Moderately important +3. Important
Q46Democratic Norms
QuestionWould it be helpful, harmful, or neither helpful nor harmful if U.S. presidents could work on the country’s problems without paying attention to what Congress and the courts say?
Value Labels-2. DK/RF +1. Helpful +2. Harmful +3. Neither helpful nor harmful
Q47Democratic Norms
QuestionDo you favor, oppose, or neither favor nor oppose elected officials restricting journalists’ access to information about government decision-making?
Value Labels-2. DK/RF +1. Favor +2. Oppose +3. Neither favor nor oppose
Q48Gender Resentment
Question‘Many women interpret innocent remarks or acts as being sexist.’ +Do you agree, neither agree nor disagree, or disagree with this statement?
Value Labels-2. DK/RF/technical error +1. Agree +2. Neither agree nor disagree +3. Disagree
Q49Gender Resentment
Question‘Women seek to gain power by getting control over men.’ +Do you agree, neither agree nor disagree, or disagree with this statement?
Value Labels-2. DK/RF/technical error +1. Agree +2. Neither agree nor disagree +3. Disagree
+ +# D.2 Questionnaire for Breaking News Feedback + +
Q01Public Cognition (PC)
QuestionI have heard of ChatGPT.
Value Labels1. Disagree +2. Partially disagree +3. Neutral +4. Partially agree +5. Agree
Q02Public Cognition (PC)
QuestionMany people around me use ChatGPT.
Value Labels1. Disagree +2. Partially disagree +3. Neutral +4. Partially agree +5. Agree
Q03Public Cognition (PC)
QuestionI have a deep understanding of ChatGPT's functions and applications.
Value Labels1. Disagree +2. Partially disagree +3. Neutral +4. Partially agree +5. Agree
Q04Perceived Risks (PR)
QuestionChatGPT may lead to the widespread dissemination of false information.
Value Labels1. Disagree +2. Partially disagree +3. Neutral +4. Partially agree +5. Agree
Q05Perceived Risks (PR)
QuestionChatGPT may reduce human thinking ability and creativity.
Value Labels1. Disagree +2. Partially disagree +3. Neutral +4. Partially agree +5. Agree
Q06Perceived Risks (PR)
QuestionThe development of ChatGPT may replace certain jobs, and I am deeply concerned about this.
Value Labels1. Disagree +2. Partially disagree +3. Neutral +4. Partially agree +5. Agree
Q07Perceived Benefits (PB)
QuestionChatGPT will definitely improve my work and study efficiency.
Value Labels1. Disagree +2. Partially disagree +3. Neutral +4. Partially agree +5. Agree
Q08Perceived Benefits (PB)
QuestionChatGPT helps broaden my knowledge and provides me with new perspectives and ideas.
Value Labels1. Disagree +2. Partially disagree +3. Neutral +4. Partially agree +5. Agree
Q09Perceived Benefits (PB)
QuestionChatGPT promotes technological innovation and development in related fields.
Value Labels1. Disagree +2. Partially disagree +3. Neutral +4. Partially agree +5. Agree
Q10Trust (TR)
QuestionI fully trust the team developing ChatGPT to manage and guide its development responsibly.
Value Labels1. Disagree +2. Partially disagree +3. Neutral +4. Partially agree +5. Agree
Q11Trust (TR)
QuestionI have strong confidence in the accuracy and reliability of the information generated by ChatGPT.
Value Labels1. Disagree +2. Partially disagree +3. Neutral +4. Partially agree +5. Agree
Q12Trust (TR)
QuestionI believe that the future application of ChatGPT will be effectively regulated.
Value Labels1. Disagree +2. Partially disagree +3. Neutral +4. Partially agree +5. Agree
Q13Fairness (FA)
QuestionThe opportunities to use ChatGPT are distributed fairly among different groups of people.
Value Labels1. Disagree +2. Partially disagree +3. Neutral +4. Partially agree +5. Agree
Q14Fairness (FA)
QuestionI find the distribution of benefits brought by ChatGPT to be fair.
Value Labels1. Disagree +2. Partially disagree +3. Neutral +4. Partially agree +5. Agree
Q15Fairness (FA)
QuestionI believe that the decision-making process for the development and promotion of ChatGPT is fully transparent and adequately reflects public interests.
Value Labels1. Disagree +2. Partially disagree +3. Neutral +4. Partially agree +5. Agree
Q16Public Acceptance (PA)
QuestionOverall, I strongly welcome the emergence of ChatGPT.
Value Labels1. Disagree +2. Partially disagree +3. Neutral +4. Partially agree +5. Agree
Q17Public Acceptance (PA)
QuestionI am definitely willing to use ChatGPT in my work or studies.
Value Labels1. Disagree +2. Partially disagree +3. Neutral +4. Partially agree +5. Agree
Q18Public Acceptance (PA)
QuestionI strongly support increased investment in the research and development of AI technologies like ChatGPT.
Value Labels1. Disagree +2. Partially disagree +3. Neutral +4. Partially agree +5. Agree
+ +# D.3 Questionnaire for National Economic Survey + +
Q01Food
QuestionWhat is your average monthly expenditure on food (including dining out)? (Unit: CNY)
Value LabelsA. Below 500 CNY +B. 501-650 CNY +C. 651-800 CNY +D. 801-1000 CNY +E. Above 1000 CNY
Q02Food
QuestionDo you think your current spending on food, tobacco, and alcohol is too high relative to your income?
Value LabelsA. Yes +B. No +C. Acceptable
Q03Clothing
QuestionWhat is your average monthly expenditure on clothing (including apparel, shoes, and accessories)? (Unit: CNY)
Value LabelsA. Below 50 CNY +B. 51-100 CNY +C. 101-150 CNY +D. 151-200 CNY +E. Above 200 CNY
Q04Clothing
QuestionHow much economic pressure do you feel from clothing expenses?
Value LabelsA. Very low, almost no pressure +B. Moderate, some pressure but manageable +C. High, requires careful spending +D. Very high, affects spending in other areas
Q05Household
QuestionWhat is your average monthly housing expenditure? (Including rent, mortgage, property fees, maintenance, etc.) (Unit: CNY)
Value LabelsA. Below 200 CNY +B. 201-500 CNY +C. 501-800 CNY +D. 801-1200 CNY +E. Above 1200 CNY
Q06Household
QuestionWhat percentage of your monthly income is spent on housing? (Including rent, mortgage, property fees, maintenance, etc.)
Value LabelsA. Below 10% +B. 10%-20% +C. 21%-30% +D. 31%-40% +E. Above 40%
Q07Daily Service
QuestionWhat is your average monthly expenditure on daily necessities (personal care, house- hold items, cleaning supplies, etc.) and services (housekeeping, repairs, beauty, pet services, etc.)? (Unit: CNY)
Value LabelsA. Below 80 CNY +B. 81-120 CNY +C. 121-160 CNY +D. 161-200 CNY +E. Above 200 CNY
Q08Transportation & Communication
QuestionWhat is your average monthly expenditure on transportation (public transport, taxis, fuel, parking, etc.) and communication (mobile and internet fees)? (Unit: CNY)
Value LabelsA. Below 200 CNY +B. 201-300 CNY +C. 301-400 CNY +D. 401-500 CNY +E. Above 500 CNY
Q09Education & Entertainment
QuestionWhat is your average monthly expenditure on education (tuition, training, books, etc.) and cultural entertainment (movies, performances, games, fitness, cultural activities, etc.)? (Unit: CNY)
Value LabelsA. Below 100 CNY +B. 101-200 CNY +C. 201-300 CNY +D. 301-400 CNY +E. Above 400 CNY
Q10Education & Entertainment
QuestionCan you easily afford your current education, cultural, and entertainment expenses?
Value LabelsA. Yes, spending does not affect other areas +B. Barely, needs some control +C. Not really, affects other expenditures +D. No, it creates significant financial pressure
Q11Medical
QuestionWhat is your average monthly expenditure on healthcare (medications, medical services, health management, etc.)? (Unit: CNY)
Value LabelsA. Below 100 CNY +B. 101-200 CNY +C. 201-300 CNY +D. 301-400 CNY +E. Above 400 CNY
Q12Medical
QuestionHave you purchased private medical or health insurance for yourself or your family?
Value LabelsA. Yes +B. Not yet, but planning to +C. No, and no plans to
Q13Others
QuestionBesides food, clothing, housing, daily necessities and services, transportation, education, culture, and healthcare, what is your average monthly expenditure on other areas (e.g., hobbies, charitable donations, investment, etc.)? (Unit: CNY)
Value LabelsA. Below 30 CNY +B. 31-60 CNY +C. 61-90 CNY +D. 91-120 CNY +E. Above 120 CNY
Q14Overall
QuestionHow would you evaluate the impact of your current consumption level on your household (or personal) financial situation?
Value LabelsA. Comfortable, can moderately increase spending +B. Average, can maintain current spending +C. Tight, need to control or reduce spending +D. Very tight, affects quality of life
Q15Overall
QuestionDo you feel that your consumption pressure is too high relative to your income level?
Value LabelsA. Yes +B. No +C. Not sure
Q16Overall
QuestionIf your income increases, which consumption areas would you most like to expand or improve? (Multiple choices allowed)
Value LabelsA. Food and alcohol +B. Clothing +C. Housing +D. Daily necessities and services +E. Transportation and communication +F. Education, culture, and entertainment +G. Healthcare +H. Other goods and services
Q17Overall
QuestionWhat is your consumption expectation for the next six months to a year?
Value LabelsA. Will continue to increase +B. Will remain roughly the same +C. Will moderately decrease +D. Uncertain
\ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10157/images/02108be3574c08f31dfa322bb6fe31f2cac9f0c26240bc43f385a3edcc216619.jpg b/data/2025/2504_10xxx/2504.10157/images/02108be3574c08f31dfa322bb6fe31f2cac9f0c26240bc43f385a3edcc216619.jpg new file mode 100644 index 0000000000000000000000000000000000000000..da453f3646cb53e00b5c36e4920ed48f1a6c0d2f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/02108be3574c08f31dfa322bb6fe31f2cac9f0c26240bc43f385a3edcc216619.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56a5534777cef90574cd4eb36048aa295f73af9d9c6d03ea42f490dc3e17d9f5 +size 290134 diff --git a/data/2025/2504_10xxx/2504.10157/images/0cb012c6c55c49fd65fa15745af750ea86382e3c5992b62bd56df63efb6706fc.jpg b/data/2025/2504_10xxx/2504.10157/images/0cb012c6c55c49fd65fa15745af750ea86382e3c5992b62bd56df63efb6706fc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e6cfa901c17f4a5558ee8e3ac464f69a348f1e6f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/0cb012c6c55c49fd65fa15745af750ea86382e3c5992b62bd56df63efb6706fc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc0cc3aa2e52fa9634c81220eb2a71717b5976533cc530b46dd78354f78cb141 +size 23707 diff --git a/data/2025/2504_10xxx/2504.10157/images/18070cfafc0e2c3f8693e46f9ae0e339394f7c42e0f7d13109ea8126d6c4d3ff.jpg b/data/2025/2504_10xxx/2504.10157/images/18070cfafc0e2c3f8693e46f9ae0e339394f7c42e0f7d13109ea8126d6c4d3ff.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ff7b70711f1f4ebfc60dfbbd15fc319c6e1c11a3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/18070cfafc0e2c3f8693e46f9ae0e339394f7c42e0f7d13109ea8126d6c4d3ff.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab478f76529ed648d1e0565550e27e9e97a35fa1eb4386258777be392ab92e2a +size 186968 diff --git a/data/2025/2504_10xxx/2504.10157/images/1c254baddeee82d257854c6de8cf0642cfab2a1bf3f197f0a8fb52bcd38fa176.jpg b/data/2025/2504_10xxx/2504.10157/images/1c254baddeee82d257854c6de8cf0642cfab2a1bf3f197f0a8fb52bcd38fa176.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fd6fb47ee597485162c99978918a6e5bc5fec9f4 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/1c254baddeee82d257854c6de8cf0642cfab2a1bf3f197f0a8fb52bcd38fa176.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3a89a04f917ac9a5b4c208d8c88312d233e5263a997cbefc41d7e624d8e3fe0 +size 39671 diff --git a/data/2025/2504_10xxx/2504.10157/images/2e6b8fa3fd5779995f19c798aae290400cdd80c75f2319f40980aedd30481058.jpg b/data/2025/2504_10xxx/2504.10157/images/2e6b8fa3fd5779995f19c798aae290400cdd80c75f2319f40980aedd30481058.jpg new file mode 100644 index 0000000000000000000000000000000000000000..850ce656c7c0c75ba4d6e562584e254006962c37 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/2e6b8fa3fd5779995f19c798aae290400cdd80c75f2319f40980aedd30481058.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09fcc66d93d6e3a56ca163ad9dd3e8090433fcdec2d17137ea0816997d9c94b3 +size 37898 diff --git a/data/2025/2504_10xxx/2504.10157/images/36082be4a11f38a78da16a370c6d153ea25495bc0786e11c4d30ffd3e799783c.jpg b/data/2025/2504_10xxx/2504.10157/images/36082be4a11f38a78da16a370c6d153ea25495bc0786e11c4d30ffd3e799783c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bae4575f527544136558fa1110036a40c8fb740e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/36082be4a11f38a78da16a370c6d153ea25495bc0786e11c4d30ffd3e799783c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5068d6fb387fc23a58ed083751437d5494de77e42c5ca70ef82b05f7dc587f6 +size 4743 diff --git a/data/2025/2504_10xxx/2504.10157/images/41b8f7e83330617a874286474fbb177b1c17d9e2e45f3b39659735bbc7457011.jpg b/data/2025/2504_10xxx/2504.10157/images/41b8f7e83330617a874286474fbb177b1c17d9e2e45f3b39659735bbc7457011.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d8a35b80327c8296872323a55b2a1f3c01abac15 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/41b8f7e83330617a874286474fbb177b1c17d9e2e45f3b39659735bbc7457011.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb12548993d9a6795156c49f6d4e766b90003858e7a2a9ec7f80633e7336ebac +size 18121 diff --git a/data/2025/2504_10xxx/2504.10157/images/577ebeddce61d3ac540945a4740d2e0fe0a1598ad0904fd729d144e3d7a8e1bf.jpg b/data/2025/2504_10xxx/2504.10157/images/577ebeddce61d3ac540945a4740d2e0fe0a1598ad0904fd729d144e3d7a8e1bf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..069dbb7851bbb0358bd0506f221184df296997f1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/577ebeddce61d3ac540945a4740d2e0fe0a1598ad0904fd729d144e3d7a8e1bf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1cc412508e504514d7469a5c9f66962c79f821bfb4088a68caded13183ce9da7 +size 202303 diff --git a/data/2025/2504_10xxx/2504.10157/images/5a4f9f269826f82d19138a4c9b1f45390e15057486c0858c22877f315d993a5d.jpg b/data/2025/2504_10xxx/2504.10157/images/5a4f9f269826f82d19138a4c9b1f45390e15057486c0858c22877f315d993a5d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3f1aea4e63f34902b30372c8691c3f6323ed5130 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/5a4f9f269826f82d19138a4c9b1f45390e15057486c0858c22877f315d993a5d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1dec3626320becada29aa3a8a7388a7954ac9655fc554367578524e7d80e0fc +size 283934 diff --git a/data/2025/2504_10xxx/2504.10157/images/5b6943b2f52d4b323fee259ac4a2f465c671d7812940c7effe8f60763112e09e.jpg b/data/2025/2504_10xxx/2504.10157/images/5b6943b2f52d4b323fee259ac4a2f465c671d7812940c7effe8f60763112e09e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eacada1566f1d7706496ea2a6eed0031873c0a1d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/5b6943b2f52d4b323fee259ac4a2f465c671d7812940c7effe8f60763112e09e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6289e3d8cb4564028b78272a7f5dfa15c5e9dde0988cc85be51885b0a7e4f520 +size 247733 diff --git a/data/2025/2504_10xxx/2504.10157/images/5cd70d2f9aa944b30cd8014a3f27307a2828ab5935fadc5daf426be35f3eedea.jpg b/data/2025/2504_10xxx/2504.10157/images/5cd70d2f9aa944b30cd8014a3f27307a2828ab5935fadc5daf426be35f3eedea.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a63d3612fc1892a1e2574133690d4656e5358385 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/5cd70d2f9aa944b30cd8014a3f27307a2828ab5935fadc5daf426be35f3eedea.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:858cb7d099bb833a8ac3c8b1738bb8aee1d74fac253bf2eca2679ee187f1c06e +size 12930 diff --git a/data/2025/2504_10xxx/2504.10157/images/6b41c5eea28ebef44e10cd877369420c1e194556283d8d7959e64dfa68ee1f47.jpg b/data/2025/2504_10xxx/2504.10157/images/6b41c5eea28ebef44e10cd877369420c1e194556283d8d7959e64dfa68ee1f47.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e8056422f001cb6d19962397aa9b60e163953270 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/6b41c5eea28ebef44e10cd877369420c1e194556283d8d7959e64dfa68ee1f47.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b85aacc058bf278c82d7cc217b6134efabb6357ec571046f70cf28612d40800 +size 232098 diff --git a/data/2025/2504_10xxx/2504.10157/images/70d85cebae8909029afc6c6fbdf59529d8448c63ca93e6fe9200e65826808979.jpg b/data/2025/2504_10xxx/2504.10157/images/70d85cebae8909029afc6c6fbdf59529d8448c63ca93e6fe9200e65826808979.jpg new file mode 100644 index 0000000000000000000000000000000000000000..63c72ff8875184fc326aa853e8ac326682b81400 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/70d85cebae8909029afc6c6fbdf59529d8448c63ca93e6fe9200e65826808979.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d6ea684255fd209306094f857118c03bded6146ecaab33377d5ce27b8e8864f +size 32871 diff --git a/data/2025/2504_10xxx/2504.10157/images/7904c50696c6c4b3ef575c985f044bc7eab44f178d18b3c0d1f745b398df9e69.jpg b/data/2025/2504_10xxx/2504.10157/images/7904c50696c6c4b3ef575c985f044bc7eab44f178d18b3c0d1f745b398df9e69.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c51eda120f988e002b934d06359bcf1e081ca5f2 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/7904c50696c6c4b3ef575c985f044bc7eab44f178d18b3c0d1f745b398df9e69.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9738de4321a356845241c32cd47a18aa4d9f765fb666eec1c854c54bafe5d036 +size 4121 diff --git a/data/2025/2504_10xxx/2504.10157/images/7ab3b60a9c281cf0675ca7cbaa1ac451358842d500a4f62bb1f322d99ada9ba4.jpg b/data/2025/2504_10xxx/2504.10157/images/7ab3b60a9c281cf0675ca7cbaa1ac451358842d500a4f62bb1f322d99ada9ba4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3a02c0f017a4a53684fc7d502bd3707b90f31a38 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/7ab3b60a9c281cf0675ca7cbaa1ac451358842d500a4f62bb1f322d99ada9ba4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:538689e8cfed429ddb58869ecbf6ad25469f29ef4c7fd47a826383538b053573 +size 15097 diff --git a/data/2025/2504_10xxx/2504.10157/images/7c9a05886551ed993c5c977d179fdfeb24745f20e17ac999b1da310b35dbc695.jpg b/data/2025/2504_10xxx/2504.10157/images/7c9a05886551ed993c5c977d179fdfeb24745f20e17ac999b1da310b35dbc695.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a4a663803183babe187f26632dc0378172de3b6b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/7c9a05886551ed993c5c977d179fdfeb24745f20e17ac999b1da310b35dbc695.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d26f94847f1df3f059fa54c048fd7a8cef86246584e831b7659f032c294648e9 +size 9193 diff --git a/data/2025/2504_10xxx/2504.10157/images/7cb07d45660942f4481e7ecfce1c3d40f9a6ce0877b27cea63d127d5b346085f.jpg b/data/2025/2504_10xxx/2504.10157/images/7cb07d45660942f4481e7ecfce1c3d40f9a6ce0877b27cea63d127d5b346085f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d37e66eb5b8e70472001d703a81bcaf272981bd1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/7cb07d45660942f4481e7ecfce1c3d40f9a6ce0877b27cea63d127d5b346085f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:161926b5a41698b077ae2d9289ed24f71c101a18143ba49ff83060fc9d5eae53 +size 33696 diff --git a/data/2025/2504_10xxx/2504.10157/images/7ce59a581d9686e1bdc3056be8b7786b1ee7c8b56671e62f94b9629946ea1c5e.jpg b/data/2025/2504_10xxx/2504.10157/images/7ce59a581d9686e1bdc3056be8b7786b1ee7c8b56671e62f94b9629946ea1c5e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c4b1255bbff9f258a0e5e1d6bc95e398a4b6998b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/7ce59a581d9686e1bdc3056be8b7786b1ee7c8b56671e62f94b9629946ea1c5e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dac2be1d15565c1ffb367164e416840deeac457a573bc48964bebb87080766ec +size 115977 diff --git a/data/2025/2504_10xxx/2504.10157/images/8d8724c8324cb94a67c34cbf4205324ec05b3f0fe6a1795102f63c1f6d883257.jpg b/data/2025/2504_10xxx/2504.10157/images/8d8724c8324cb94a67c34cbf4205324ec05b3f0fe6a1795102f63c1f6d883257.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3ab1ec5d73d7ec3762da731e023dc432339faad6 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/8d8724c8324cb94a67c34cbf4205324ec05b3f0fe6a1795102f63c1f6d883257.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d28bba9964b5fd2accbb5e2ffda817d6f01fa5335385dfc03c03ffdd4e6b44e +size 186867 diff --git a/data/2025/2504_10xxx/2504.10157/images/8e14d1c2825d1d0816a823a907c78e20f63f7b4e274ee343a9155a827632e1a0.jpg b/data/2025/2504_10xxx/2504.10157/images/8e14d1c2825d1d0816a823a907c78e20f63f7b4e274ee343a9155a827632e1a0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f169e409596439e3423b8a9e39074f33e5b49cc1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/8e14d1c2825d1d0816a823a907c78e20f63f7b4e274ee343a9155a827632e1a0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5954662c420e24264a2f294508c63857140c6278c28bf46b692836ee96ed141 +size 269594 diff --git a/data/2025/2504_10xxx/2504.10157/images/91ad3cd680ab7c20191622101061881bd683e063e4f7da28fa9d5ce86b319768.jpg b/data/2025/2504_10xxx/2504.10157/images/91ad3cd680ab7c20191622101061881bd683e063e4f7da28fa9d5ce86b319768.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4a762e25311169cf7aeb3d82a74f0998d2c66453 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/91ad3cd680ab7c20191622101061881bd683e063e4f7da28fa9d5ce86b319768.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2999964a6943f0e60bf533950c870b2c376580650f0ddcff2ebeaef46f070789 +size 267721 diff --git a/data/2025/2504_10xxx/2504.10157/images/95378075e345f87be207b28094b42ea42f66abffcb8dd1545b7c107a69d3e97d.jpg b/data/2025/2504_10xxx/2504.10157/images/95378075e345f87be207b28094b42ea42f66abffcb8dd1545b7c107a69d3e97d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..98c268b6e5103b31ae3ca86f4de4ff960b7bc8ce --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/95378075e345f87be207b28094b42ea42f66abffcb8dd1545b7c107a69d3e97d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1464dddbde0fbf6b9f9b42fb6543a052f92b077f3b843b4eb5e52dad708188af +size 28892 diff --git a/data/2025/2504_10xxx/2504.10157/images/9839e0e42e838abe4b61415c929efb4951f849533401bc247dd8cf0d2900ba80.jpg b/data/2025/2504_10xxx/2504.10157/images/9839e0e42e838abe4b61415c929efb4951f849533401bc247dd8cf0d2900ba80.jpg new file mode 100644 index 0000000000000000000000000000000000000000..db70fa797387ba5691827badcd9afa23c6b3003c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/9839e0e42e838abe4b61415c929efb4951f849533401bc247dd8cf0d2900ba80.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53dd3ae075761916df7b20e67f35f198acd23ae841cf144ee6db9735845aea6e +size 14561 diff --git a/data/2025/2504_10xxx/2504.10157/images/9971298eafb0097555473200ebe95969169d99fbf1d407eea813867188612681.jpg b/data/2025/2504_10xxx/2504.10157/images/9971298eafb0097555473200ebe95969169d99fbf1d407eea813867188612681.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d8f4bd3249cfba4309d3f44008311d65a1794923 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/9971298eafb0097555473200ebe95969169d99fbf1d407eea813867188612681.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9908147701823e8cc752b21234aee771f1a3b878732bbe3cb71ddda084fc5641 +size 43516 diff --git a/data/2025/2504_10xxx/2504.10157/images/9ab297241e7b5ad5640f2f7c89ea694c7a2b607437aa0355f16e8a8ee7812b35.jpg b/data/2025/2504_10xxx/2504.10157/images/9ab297241e7b5ad5640f2f7c89ea694c7a2b607437aa0355f16e8a8ee7812b35.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3b814655f6618aa4def4ddee261e2bf3eaf0d489 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/9ab297241e7b5ad5640f2f7c89ea694c7a2b607437aa0355f16e8a8ee7812b35.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2def6d3a86de65d73f779f20e568d7f47c74cd9f0c10964c0b2e4e4a71516416 +size 18334 diff --git a/data/2025/2504_10xxx/2504.10157/images/a66412febde24e12e177a5c22dc9307635f04919238d13f32d8145fd1596adf3.jpg b/data/2025/2504_10xxx/2504.10157/images/a66412febde24e12e177a5c22dc9307635f04919238d13f32d8145fd1596adf3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e2c92e908c9da020b09309ff3f576de58ec94211 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/a66412febde24e12e177a5c22dc9307635f04919238d13f32d8145fd1596adf3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4b6f55774271a94ab75d3aa2c14c67e92d92218f985ce72cda5c985ad526362 +size 216955 diff --git a/data/2025/2504_10xxx/2504.10157/images/af88ad03ac5ce1c47c53dd172206932141236f13f7154e620648f8b4af53663e.jpg b/data/2025/2504_10xxx/2504.10157/images/af88ad03ac5ce1c47c53dd172206932141236f13f7154e620648f8b4af53663e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1eb48cc3f03094b86807ac5c0c2f19afcfd3cda9 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/af88ad03ac5ce1c47c53dd172206932141236f13f7154e620648f8b4af53663e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6dd5d336e3f27304deb008747e867a0c24ee95e4d37f608d678310e54a618da1 +size 120679 diff --git a/data/2025/2504_10xxx/2504.10157/images/afe8fd5e150abb10daff5d2cd9105ca719a3f43016065f6085c46dd68212ec23.jpg b/data/2025/2504_10xxx/2504.10157/images/afe8fd5e150abb10daff5d2cd9105ca719a3f43016065f6085c46dd68212ec23.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b2a15208c0960b393131b20907379aba865270d6 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/afe8fd5e150abb10daff5d2cd9105ca719a3f43016065f6085c46dd68212ec23.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86a3ccd0b3d4f1c773bd8e137db688d7de17827bbbbf95b2e56c7fb9b18bec06 +size 82125 diff --git a/data/2025/2504_10xxx/2504.10157/images/b3698fd8a7cc0afbf47a6e31691ed9fd5974990404ef71422f83c8297bb7cea7.jpg b/data/2025/2504_10xxx/2504.10157/images/b3698fd8a7cc0afbf47a6e31691ed9fd5974990404ef71422f83c8297bb7cea7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..93528f6c34b15fe62eba228b0231bebafe34e82b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/b3698fd8a7cc0afbf47a6e31691ed9fd5974990404ef71422f83c8297bb7cea7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b7c8f51d081439c8f89e1533ff0a48fae9ae63f792a63d34eb7d5571ba4b028 +size 39107 diff --git a/data/2025/2504_10xxx/2504.10157/images/b549f4ea6be4bd3460a79b7004922964d2d10d6fea789b0c2253d82729469c6e.jpg b/data/2025/2504_10xxx/2504.10157/images/b549f4ea6be4bd3460a79b7004922964d2d10d6fea789b0c2253d82729469c6e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..73631e8769c3262565cdc5621ea9c4f3e8d6c91b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/b549f4ea6be4bd3460a79b7004922964d2d10d6fea789b0c2253d82729469c6e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8f13994204268a9220d623d5314c02fb2fe35d898d5efa91b68cafaf334fce8 +size 187575 diff --git a/data/2025/2504_10xxx/2504.10157/images/c20d35dd2c26c2ee2cdb820d4f671e70e3eae3a204e23a512d96c6000ab32e42.jpg b/data/2025/2504_10xxx/2504.10157/images/c20d35dd2c26c2ee2cdb820d4f671e70e3eae3a204e23a512d96c6000ab32e42.jpg new file mode 100644 index 0000000000000000000000000000000000000000..68b2c36722e016bcc65a926ba7b70dff2bc6173b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/c20d35dd2c26c2ee2cdb820d4f671e70e3eae3a204e23a512d96c6000ab32e42.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5dd1703afeb5a6dca9e6916fe8e64b80e77ca8f29f603d6ee6b32a927018f5fb +size 231552 diff --git a/data/2025/2504_10xxx/2504.10157/images/c455658df89756889d84385989fcf4d04f7baa808ab9d0760fe1501340dd9f50.jpg b/data/2025/2504_10xxx/2504.10157/images/c455658df89756889d84385989fcf4d04f7baa808ab9d0760fe1501340dd9f50.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5ce13afefd8d76dedbe487432f115ad6d98d70bb --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/c455658df89756889d84385989fcf4d04f7baa808ab9d0760fe1501340dd9f50.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d88021c0b88ef58c4f4710e78c54d1d9d558b95de78fb7d351ec0b2c8df4628 +size 5939 diff --git a/data/2025/2504_10xxx/2504.10157/images/c8315fc00f5be85637036238e2bbe85316d8d30a637370c74716c310a2613d7d.jpg b/data/2025/2504_10xxx/2504.10157/images/c8315fc00f5be85637036238e2bbe85316d8d30a637370c74716c310a2613d7d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1c3eb348e17556efbc19db9087cc33c25f9634e8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/c8315fc00f5be85637036238e2bbe85316d8d30a637370c74716c310a2613d7d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58bb665710155d4706404effa2c01d7fd3544c91fd420581f1d15afbd75c3dda +size 66168 diff --git a/data/2025/2504_10xxx/2504.10157/images/c89d917b41e40f4b606b5a27431a400adae43e74cb416cbd6208d68554aee7ab.jpg b/data/2025/2504_10xxx/2504.10157/images/c89d917b41e40f4b606b5a27431a400adae43e74cb416cbd6208d68554aee7ab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d983770c764c5f6fe8e01f0d804aeba6e7aa8362 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/c89d917b41e40f4b606b5a27431a400adae43e74cb416cbd6208d68554aee7ab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f225dd497e4c5079eb3a1155f86a8f34132a61619bccc8f64af9c5f995ca605 +size 4058 diff --git a/data/2025/2504_10xxx/2504.10157/images/cb26ecd99c58640d8eb4effc287a4e8bee0c4214555edb59f3d149882199fb82.jpg b/data/2025/2504_10xxx/2504.10157/images/cb26ecd99c58640d8eb4effc287a4e8bee0c4214555edb59f3d149882199fb82.jpg new file mode 100644 index 0000000000000000000000000000000000000000..34132c50711c99650c1be22eed09aa4f90724f49 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/cb26ecd99c58640d8eb4effc287a4e8bee0c4214555edb59f3d149882199fb82.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3eb2f04c62f25e87422ffdbfee37692b7bb54f9ae0b551ebed28d8633f0c466f +size 19407 diff --git a/data/2025/2504_10xxx/2504.10157/images/ee37a034cc7bfd54903875bca0d8ebdb66151851f1d8c1aae3fe11feeefe474d.jpg b/data/2025/2504_10xxx/2504.10157/images/ee37a034cc7bfd54903875bca0d8ebdb66151851f1d8c1aae3fe11feeefe474d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6acec4a8febaee48dec83a05df09cad7222bc096 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/ee37a034cc7bfd54903875bca0d8ebdb66151851f1d8c1aae3fe11feeefe474d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6c6d229ff88d42a4f0f113c40a10bde93437b04d6b3308017086b2e2e014be8 +size 113002 diff --git a/data/2025/2504_10xxx/2504.10157/images/f30fe370c7d43ea99207a6f091595cf5559b34c28ba881f238d60524e7804d01.jpg b/data/2025/2504_10xxx/2504.10157/images/f30fe370c7d43ea99207a6f091595cf5559b34c28ba881f238d60524e7804d01.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9187afffae3d4aed2d7543cb1cb74e5f337b7866 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/f30fe370c7d43ea99207a6f091595cf5559b34c28ba881f238d60524e7804d01.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9f9545b5c3bed8fcc14d8871944a33e71af15bed22c162e70ac8aad264d1275 +size 41886 diff --git a/data/2025/2504_10xxx/2504.10157/images/ff781687759cc0943011e2da7ef1096cc2baed0a7ee1909c62cdf62fa3b74e60.jpg b/data/2025/2504_10xxx/2504.10157/images/ff781687759cc0943011e2da7ef1096cc2baed0a7ee1909c62cdf62fa3b74e60.jpg new file mode 100644 index 0000000000000000000000000000000000000000..35da2a4bb7b346dbb489bba62ddf358afbded800 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/images/ff781687759cc0943011e2da7ef1096cc2baed0a7ee1909c62cdf62fa3b74e60.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7f0da4719e501b2deab90a79d43c0edd36a7c8e7ec9dde952a826e24ee1c7e9 +size 6226 diff --git a/data/2025/2504_10xxx/2504.10157/layout.json b/data/2025/2504_10xxx/2504.10157/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..b3c75e6ccaa4039e2697f2256dfa157c4768d207 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10157/layout.json @@ -0,0 +1,11165 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 120, + 97, + 491, + 155 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 97, + 491, + 155 + ], + "spans": [ + { + "bbox": [ + 120, + 97, + 491, + 155 + ], + "type": "text", + "content": "SocioVerse: A World Model for Social Simulation Powered by LLM Agents and A Pool of 10 Million Real-World Users" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "spans": [ + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "text", + "content": "Xinnong Zhang" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "inline_equation", + "content": "^{1,2\\dagger}" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "text", + "content": ", Jiayu Lin" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "inline_equation", + "content": "^{1,2\\dagger}" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "text", + "content": ", Xinyi Mou" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "inline_equation", + "content": "^{2\\dagger}" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "text", + "content": ", Shiyue Yang" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "text", + "content": ", Xiawei Liu" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "text", + "content": ", Libo Sun" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "text", + "content": ", Hanjia Lyu" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "text", + "content": ", Yihang Yang" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "text", + "content": ", Weihong Qi" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "text", + "content": ", Yue Chen" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "text", + "content": ", Guanying Li" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "text", + "content": ", Ling Yan" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "text", + "content": ", Yao Hu" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "text", + "content": ", Siming Chen" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "text", + "content": ", Yu Wang" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "text", + "content": ", Xuanjing Huang" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "text", + "content": ", Jiebo Luo" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "text", + "content": ", Shiping Tang" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "text", + "content": ", Libo Wu" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "text", + "content": ", Baohua Zhou" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "text", + "content": ", Zhongyu Wei" + }, + { + "bbox": [ + 114, + 194, + 499, + 243 + ], + "type": "inline_equation", + "content": "^{1,2}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 173, + 242, + 436, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 173, + 242, + 436, + 287 + ], + "spans": [ + { + "bbox": [ + 173, + 242, + 436, + 287 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 173, + 242, + 436, + 287 + ], + "type": "text", + "content": "Shanghai Innovation Institute, " + }, + { + "bbox": [ + 173, + 242, + 436, + 287 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 173, + 242, + 436, + 287 + ], + "type": "text", + "content": "Fudan University, " + }, + { + "bbox": [ + 173, + 242, + 436, + 287 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 173, + 242, + 436, + 287 + ], + "type": "text", + "content": "University of Rochester, " + }, + { + "bbox": [ + 173, + 242, + 436, + 287 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 173, + 242, + 436, + 287 + ], + "type": "text", + "content": "Indiana University, " + }, + { + "bbox": [ + 173, + 242, + 436, + 287 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 173, + 242, + 436, + 287 + ], + "type": "text", + "content": "Xiaohongshu Inc. zywei@fudan.edu.cn \nSocioVerse: https://github.com/FudanDISC/SocioVerse" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 112, + 300, + 491, + 497 + ], + "blocks": [ + { + "bbox": [ + 112, + 300, + 491, + 497 + ], + "lines": [ + { + "bbox": [ + 112, + 300, + 491, + 497 + ], + "spans": [ + { + "bbox": [ + 112, + 300, + 491, + 497 + ], + "type": "image", + "image_path": "ee37a034cc7bfd54903875bca0d8ebdb66151851f1d8c1aae3fe11feeefe474d.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 499, + 504, + 522 + ], + "lines": [ + { + "bbox": [ + 104, + 499, + 504, + 522 + ], + "spans": [ + { + "bbox": [ + 104, + 499, + 504, + 522 + ], + "type": "text", + "content": "Figure 1: An illustration of the SocioVerse in the case of Ukraine issue. The alignment challenges are well handled regarding environment, user, scenario, and behavior." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 281, + 534, + 329, + 547 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 281, + 534, + 329, + 547 + ], + "spans": [ + { + "bbox": [ + 281, + 534, + 329, + 547 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 140, + 558, + 470, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 558, + 470, + 702 + ], + "spans": [ + { + "bbox": [ + 140, + 558, + 470, + 702 + ], + "type": "text", + "content": "Social simulation is transforming traditional social science research by modeling human behavior through interactions between virtual individuals and their environments. With recent advances in large language models (LLMs), this approach has shown growing potential in capturing individual differences and predicting group behaviors. However, existing methods face alignment challenges related to the environment, target users, interaction mechanisms, and behavioral patterns. To this end, we introduce SocioVerse, an LLM-agent-driven world model for social simulation. Our framework features four powerful alignment components and a user pool of 10 million real individuals. To validate its effectiveness, we conducted large-scale simulation experiments across three distinct domains: politics, news, and economics. Results demonstrate that SocioVerse can reflect large-scale population dynamics while ensuring diversity, credibility, and representativeness through standardized procedures and minimal manual adjustments." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 224, + 35, + 567 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 224, + 35, + 567 + ], + "spans": [ + { + "bbox": [ + 14, + 224, + 35, + 567 + ], + "type": "text", + "content": "arXiv:2504.10157v3 [cs.CL] 15 Jul 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 118, + 710, + 288, + 722 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 710, + 288, + 722 + ], + "spans": [ + { + "bbox": [ + 118, + 710, + 288, + 722 + ], + "type": "text", + "content": "These authors contribute equally to this work." + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 71, + 191, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 71, + 191, + 83 + ], + "spans": [ + { + "bbox": [ + 106, + 71, + 191, + 83 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 105, + 506, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 105, + 506, + 160 + ], + "spans": [ + { + "bbox": [ + 104, + 105, + 506, + 160 + ], + "type": "text", + "content": "The study of human behavior aims to understand how individuals and groups act in various social contexts and serves as a cornerstone of social science research. Traditionally, this has been accomplished using methods such as surveys, interviews, and observations [10, 18, 44]. However, these approaches often encounter challenges, including high costs, limited sample sizes, and ethical concerns. As a result, researchers have resorted to alternative methods for studying human behavior." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 164, + 504, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 164, + 504, + 253 + ], + "spans": [ + { + "bbox": [ + 104, + 164, + 504, + 253 + ], + "type": "text", + "content": "Social simulation has emerged as an effective method for addressing this issue, where researchers use agents to model human behavior, observe their reactions, and translate these findings into insights about human behavior [48, 50]. By assigning behavioral rules to autonomous agents, researchers can explore how micro-level decisions lead to emergent macro-level patterns through the agent-based models [11, 21]. This approach enables capturing specific groups' preferences on particular topics and forecasting potential social dynamics. Furthermore, recent advancements in large language models (LLMs) have significantly enhanced agents' reasoning and decision-making capabilities, enabling them to operate and interact within increasingly realistic and complex environments [3, 35, 37]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 258, + 506, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 258, + 506, + 313 + ], + "spans": [ + { + "bbox": [ + 104, + 258, + 506, + 313 + ], + "type": "text", + "content": "Recent studies have explored social simulation across various levels and scenarios, from mimicking well-known individuals and mirroring specific situations to modeling large-scale social dynamics [4, 29, 34, 36, 49, 60]. However, they share a common challenge: alignment between the simulated environment and the real world, which manifests across multiple dimensions and raises several key questions that remain to be addressed, as shown in Figure 1." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 318, + 385, + 328 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 318, + 385, + 328 + ], + "spans": [ + { + "bbox": [ + 105, + 318, + 385, + 328 + ], + "type": "text", + "content": "Q1. How to align the simulated environment with the real world?" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 329, + 506, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 329, + 506, + 406 + ], + "spans": [ + { + "bbox": [ + 104, + 329, + 506, + 406 + ], + "type": "text", + "content": "In the real world, new events occur every day and new content is continuously generated. The behavior of real users is rooted in these ever-evolving social contexts and policy agendas. However, the static knowledge of LLMs prevents them from aligning with the dynamic nature of the real-world social environment [2, 15]. There is a gap between the simulated context and the real world, which results in discrepancies between the simulation process and outcomes compared to those in reality. Therefore, it is necessary to establish an update mechanism to keep the simulated environment synchronized with the real world." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 411, + 373, + 422 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 411, + 373, + 422 + ], + "spans": [ + { + "bbox": [ + 105, + 411, + 373, + 422 + ], + "type": "text", + "content": "Q2. How to align simulated agents with target users precisely?" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 422, + 504, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 422, + 504, + 487 + ], + "spans": [ + { + "bbox": [ + 104, + 422, + 504, + 487 + ], + "type": "text", + "content": "The composition of users in the real world is both complex and diverse, making it impractical to enumerate all users in every scenario. Therefore, it is essential to identify target users whose distribution aligns with that of the users in the corresponding scenario, thereby accurately reflecting the real-world composition and relationships [17, 45]. Based on this, precise target user simulation also requires providing agents with a detailed and comprehensive description of the corresponding users, often involving the integration of high-fidelity demographic, contextual, and behavioral data." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 492, + 496, + 503 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 492, + 496, + 503 + ], + "spans": [ + { + "bbox": [ + 105, + 492, + 496, + 503 + ], + "type": "text", + "content": "Q3. How to align the interaction mechanism with the real world among different scenarios?" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 503, + 506, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 503, + 506, + 580 + ], + "spans": [ + { + "bbox": [ + 104, + 503, + 506, + 580 + ], + "type": "text", + "content": "The diversity of social interactions presents challenges in social simulation design, requiring deliberate choices regarding the number of individuals, social structures, interaction patterns, and message dissemination mechanisms, to align with the real world. This often results in independently constructed task-specific simulation pipelines performing repetitive work, which reduces their generalizability and scalability [26, 58]. Therefore, there is a need for unified simulation frameworks based on systematic categorization to standardize simulation components and facilitate extensibility across different social scenarios." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 585, + 399, + 597 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 585, + 399, + 597 + ], + "spans": [ + { + "bbox": [ + 105, + 585, + 399, + 597 + ], + "type": "text", + "content": "Q4. How to align the behavioral pattern with the real-world groups?" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 596, + 505, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 596, + 505, + 662 + ], + "spans": [ + { + "bbox": [ + 104, + 596, + 505, + 662 + ], + "type": "text", + "content": "When the environment perceived by agents, the user composition, and the interaction mechanisms are aligned with the real world, agents are expected to exhibit responses consistent with those of the corresponding real users. However, current LLMs exhibit inherent bias and limitations in such reasoning, failing to infer different types of user behaviors [16, 60]. Therefore, it is necessary to systematically collect behavior-driving factors across different user characteristics and adopt appropriate modeling approaches to effectively capture diverse behavior patterns." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 667, + 505, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 667, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 667, + 505, + 723 + ], + "type": "text", + "content": "In this paper, we propose SocioVerse, a world model for social simulation driven by LLM-based agents based on a large-scale real-world user pool. As shown in Figure 2, we design modular components to address the above questions. The Social Environment injects up-to-date and external real-world information into the simulation. The User Engine and Scenario Engine respectively reconstruct realistic user context and orchestrate the simulation process to align the simulation with" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 112, + 77, + 495, + 316 + ], + "blocks": [ + { + "bbox": [ + 112, + 77, + 495, + 316 + ], + "lines": [ + { + "bbox": [ + 112, + 77, + 495, + 316 + ], + "spans": [ + { + "bbox": [ + 112, + 77, + 495, + 316 + ], + "type": "image", + "image_path": "af88ad03ac5ce1c47c53dd172206932141236f13f7154e620648f8b4af53663e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 326, + 504, + 371 + ], + "lines": [ + { + "bbox": [ + 104, + 326, + 504, + 371 + ], + "spans": [ + { + "bbox": [ + 104, + 326, + 504, + 371 + ], + "type": "text", + "content": "Figure 2: An illustration of SocioVerse framework involving 4 powerful parts. The social environment provides an updated context for the simulation. During the simulation, the behavior engine takes the simulation setting, user profiles, and social information from the scenario engine, user engine, and social environment, respectively, and generates the results according to the query." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 403, + 504, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 403, + 504, + 427 + ], + "spans": [ + { + "bbox": [ + 104, + 403, + 504, + 427 + ], + "type": "text", + "content": "the real world. Given this rich contextual setup, the Behavior Engine then drives agents to reproduce human behaviors accordingly." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 430, + 504, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 430, + 504, + 487 + ], + "spans": [ + { + "bbox": [ + 104, + 430, + 504, + 487 + ], + "type": "text", + "content": "To support the framework, we construct a user pool of 10 million individuals by collecting real-world social media data to power the user engine. Comparable in scale to the entire populations of Hungary or Greece, this extensive pool enables diverse and large-scale social simulations. For any customized simulation task, various sampling strategies can be applied to extract target user groups from the pool to support the simulation process." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 491, + 506, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 491, + 506, + 557 + ], + "spans": [ + { + "bbox": [ + 104, + 491, + 506, + 557 + ], + "type": "text", + "content": "We conduct three simulations using the SocioVerse framework, each differing in research domain, user composition, and social environment: (a) presidential election prediction, (b) breaking news feedback, and (c) national economic survey. For each task, we compare the simulation results with real-world situations. Extensive and comprehensive experiments demonstrate that our framework serves as a robust foundation for building standardized and accurate large-scale social simulations. In summary, our key contributions are as follows:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 576, + 506, + 706 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 132, + 576, + 504, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 576, + 504, + 609 + ], + "spans": [ + { + "bbox": [ + 132, + 576, + 504, + 609 + ], + "type": "text", + "content": "- SocioVerse: We propose a world model for social simulation comprising four powerful alignment modules, enabling diverse and trustworthy social simulations (as illustrated in Figure 2)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 624, + 506, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 624, + 506, + 658 + ], + "spans": [ + { + "bbox": [ + 132, + 624, + 506, + 658 + ], + "type": "text", + "content": "- 10M User Pool: A user pool of 10 million individuals, constructed from real-world behavioral data, enables large-scale and diverse social simulations, ranging from small interest groups to large citizen communities." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 672, + 504, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 672, + 504, + 706 + ], + "spans": [ + { + "bbox": [ + 132, + 672, + 504, + 706 + ], + "type": "text", + "content": "- Three Illustrative Simulations: We demonstrate the framework's capabilities through three distinct scenarios: presidential election prediction, breaking news feedback, and a national economic survey, providing a foundation for future research." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 171, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 171, + 83 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 171, + 83 + ], + "type": "text", + "content": "2 Methods" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 96, + 194, + 107 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 96, + 194, + 107 + ], + "spans": [ + { + "bbox": [ + 105, + 96, + 194, + 107 + ], + "type": "text", + "content": "Overall Framework" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 118, + 506, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 118, + 506, + 174 + ], + "spans": [ + { + "bbox": [ + 104, + 118, + 506, + 174 + ], + "type": "text", + "content": "The SocioVerse framework follows a structured pipeline to achieve realistic social simulation results, as shown in Figure 2: (1) Social Environment collects updated information and contextual knowledge. Within the simulation environment, (2) User Engine aligns the simulated agents with target users, (3) Scenario Engine aligns the interaction structure with diverse scenarios, and (4) Behavior Engine aligns the behavioral pattern with real-world target groups." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 188, + 215, + 198 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 188, + 215, + 198 + ], + "spans": [ + { + "bbox": [ + 105, + 188, + 215, + 198 + ], + "type": "text", + "content": "2.1 Social Environment" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 209, + 506, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 209, + 506, + 243 + ], + "spans": [ + { + "bbox": [ + 104, + 209, + 506, + 243 + ], + "type": "text", + "content": "Function The social environment provides event-related context to align the simulation environment with real-world conditions. By integrating up-to-date events, social statistics, and preference content into LLM-based agents, it enhances the realism of the simulation and improve agent decision-making." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 255, + 504, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 255, + 504, + 289 + ], + "spans": [ + { + "bbox": [ + 104, + 255, + 504, + 289 + ], + "type": "text", + "content": "Components The social environment should encompass as much real-world social, cultural, and technological context as possible. It can be broadly categorized into three types: social structural information, social dynamic information, and personalized context." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 293, + 505, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 293, + 505, + 361 + ], + "spans": [ + { + "bbox": [ + 104, + 293, + 505, + 361 + ], + "type": "text", + "content": "Social Structure: Social structural information provides agents with a rich knowledge base encompassing demographic distributions, cultural norms, urban infrastructures, and collective behavior patterns [57]. This data allows agents to behave in a way that aligns with the typical characteristics of their assigned demographic or geographic profile. For example, by incorporating regional dialect preferences, work-life habits, and common social values, the simulation can more accurately reflect public discourse trends, mobility behaviors, and economic interactions." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 365, + 504, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 365, + 504, + 421 + ], + "spans": [ + { + "bbox": [ + 104, + 365, + 504, + 421 + ], + "type": "text", + "content": "Social Dynamics: Social dynamics encompass time-sensitive content continuously generated in the real world, such as news events and policy changes. Typically, this engine maintains an up-to-date event base to continuously collect real-world event news from mainstream news, and all the news articles contain time stamps and event-related tags so that LLM-based agents can comb through the timeline of the events and react accordingly [37]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 425, + 505, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 425, + 505, + 480 + ], + "spans": [ + { + "bbox": [ + 104, + 425, + 505, + 480 + ], + "type": "text", + "content": "Personalized Context: In addition to the macro social environment, individuals also receive different personalized information feeds. Previous studies have explored that the recommendation system can enhance the behavior diversity of the agent [31, 56, 60]. Consequently, the preference content component constructs relevant posts and pushes them to agents according to their social interaction network and interesting topics." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 495, + 183, + 506 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 495, + 183, + 506 + ], + "spans": [ + { + "bbox": [ + 105, + 495, + 183, + 506 + ], + "type": "text", + "content": "2.2 User Engine" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 515, + 506, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 515, + 506, + 539 + ], + "spans": [ + { + "bbox": [ + 104, + 515, + 506, + 539 + ], + "type": "text", + "content": "Function The user engine aligns simulated agents with a rich set of real-world user samples, enabling the creation of complex target users within the simulation." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 552, + 504, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 552, + 504, + 574 + ], + "spans": [ + { + "bbox": [ + 104, + 552, + 504, + 574 + ], + "type": "text", + "content": "Components To support diverse user composition and effective user retrieval and description, the user engine incorporates a large user pool and a wide range of user labels." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 578, + 506, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 578, + 506, + 689 + ], + "spans": [ + { + "bbox": [ + 104, + 578, + 506, + 689 + ], + "type": "text", + "content": "User Pools: The user pool is designed to collect extensive digital footprints of individuals across social media platforms, enabling a more comprehensive characterization of real-world behavioral patterns and expression tendencies. To this end, we constructed a user pool covering a variety of social media platforms, including " + }, + { + "bbox": [ + 104, + 578, + 506, + 689 + ], + "type": "inline_equation", + "content": "\\mathbf{X}^1" + }, + { + "bbox": [ + 104, + 578, + 506, + 689 + ], + "type": "text", + "content": " and Rednote2. Anomalous data, such as advertising and bot-generated content, is filtered by calculating the post frequency and average text similarity. The detailed procedure can be found in Appendix A. We index users and construct a user pool of 10 million users based on the collected social media posts. Formally, we define user pool as: " + }, + { + "bbox": [ + 104, + 578, + 506, + 689 + ], + "type": "inline_equation", + "content": "UserPool = \\{U_i, P_i \\mid i \\in \\mathbb{S}\\}" + }, + { + "bbox": [ + 104, + 578, + 506, + 689 + ], + "type": "text", + "content": ", where the " + }, + { + "bbox": [ + 104, + 578, + 506, + 689 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 578, + 506, + 689 + ], + "type": "text", + "content": "-th user " + }, + { + "bbox": [ + 104, + 578, + 506, + 689 + ], + "type": "inline_equation", + "content": "U_i" + }, + { + "bbox": [ + 104, + 578, + 506, + 689 + ], + "type": "text", + "content": " derives from the collection of social media platforms " + }, + { + "bbox": [ + 104, + 578, + 506, + 689 + ], + "type": "inline_equation", + "content": "\\mathbb{S}" + }, + { + "bbox": [ + 104, + 578, + 506, + 689 + ], + "type": "text", + "content": " with his/her related posts " + }, + { + "bbox": [ + 104, + 578, + 506, + 689 + ], + "type": "inline_equation", + "content": "P_i = \\{P_{i,1}, P_{i,2}, \\ldots\\}" + }, + { + "bbox": [ + 104, + 578, + 506, + 689 + ], + "type": "text", + "content": ". The statistical summary of the user pool is provided in Table 1." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 118, + 700, + 190, + 711 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 700, + 190, + 711 + ], + "spans": [ + { + "bbox": [ + 118, + 700, + 190, + 711 + ], + "type": "text", + "content": "1https://x.com/" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 118, + 711, + 256, + 722 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 711, + 256, + 722 + ], + "spans": [ + { + "bbox": [ + 118, + 711, + 256, + 722 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 118, + 711, + 256, + 722 + ], + "type": "text", + "content": "https://www.xiaohongshu.com/" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 226, + 70, + 383, + 121 + ], + "blocks": [ + { + "bbox": [ + 226, + 70, + 383, + 121 + ], + "lines": [ + { + "bbox": [ + 226, + 70, + 383, + 121 + ], + "spans": [ + { + "bbox": [ + 226, + 70, + 383, + 121 + ], + "type": "table", + "html": "
Source# Users# Posts
X1,006,51730,195,510
Rednote9,158,40440,963,735
", + "image_path": "7ab3b60a9c281cf0675ca7cbaa1ac451358842d500a4f62bb1f322d99ada9ba4.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 201, + 125, + 407, + 137 + ], + "lines": [ + { + "bbox": [ + 201, + 125, + 407, + 137 + ], + "spans": [ + { + "bbox": [ + 201, + 125, + 407, + 137 + ], + "type": "text", + "content": "Table 1: Statistical summary of the 10M user pool." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 152, + 506, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 152, + 506, + 283 + ], + "spans": [ + { + "bbox": [ + 104, + 152, + 506, + 283 + ], + "type": "text", + "content": "User Labels: User labels refer to the tagging and description of users, which can be represented using discrete attributes or continuous representation. Demographic descriptions of users are the most commonly used form of labeling. However, they are often not directly accessible. Therefore, we designed a demographic annotation system to infer and label user attributes. The process begins with multiple LLMs serving as initial annotators, classifying users across various demographic dimensions. Human annotators then evaluate and refine the LLM-generated labels, ensuring the reliability of the user tags dataset. The curated dataset is subsequently used to train demographic classifiers, enabling large-scale annotation in a cost-effective manner. Specifically, we annotate users across 15 demographic dimensions: age, gender, vocation, race, income, education, settlement type, region, employment, marital status, religious, party, ideology, BigFive personality, and hobbies. Each attribute is inferred by a specialized classifier trained on the corresponding subset of the user tags dataset. See Appendix B for further details." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 296, + 201, + 307 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 296, + 201, + 307 + ], + "spans": [ + { + "bbox": [ + 105, + 296, + 201, + 307 + ], + "type": "text", + "content": "2.3 Scenario Engine" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 316, + 504, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 316, + 504, + 350 + ], + "spans": [ + { + "bbox": [ + 104, + 316, + 504, + 350 + ], + "type": "text", + "content": "Function The scenario engine aligns various simulation structures with real-world contexts based on specific task formulations and scenario types, and then scales individual simulations by sampling according to demographic distributions provided by the user engine." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 361, + 505, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 361, + 505, + 394 + ], + "spans": [ + { + "bbox": [ + 104, + 361, + 505, + 394 + ], + "type": "text", + "content": "Components The scenario engine formulates a wide range of real-world social situations, which can be summarized as archetypal scenario templates, including questionnaires, in-depth interviews, behavior experiments, and social media interaction." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 399, + 504, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 399, + 504, + 433 + ], + "spans": [ + { + "bbox": [ + 104, + 399, + 504, + 433 + ], + "type": "text", + "content": "Questionnaire: The questionnaire scenario constructs the simulation in a 1-to-N manner, with one designed scale or questionnaire answered by multiple target users in a single round. This scenario is suitable for massive social investigation on specific topics, like election polls." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 437, + 505, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 437, + 505, + 503 + ], + "spans": [ + { + "bbox": [ + 104, + 437, + 505, + 503 + ], + "type": "text", + "content": "Inddepth Interview: The in-depth interview scenario follows a 1-to-1 structure, where a simulated interviewer engages with an individual target user through multiple interaction rounds [43]. This iterative process allows for probing deeper into responses, clarifying ambiguities, and exploring underlying motivations. Such simulations are particularly useful for qualitative research on user experiences, psychological assessments, and exploratory studies where nuanced responses and detailed reasoning are essential." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 508, + 505, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 508, + 505, + 574 + ], + "spans": [ + { + "bbox": [ + 104, + 508, + 505, + 574 + ], + "type": "text", + "content": "Behavior Experiment: The behavior experiment scenario is typically conducted in a 1-to-N or N-to-N format, depending on whether individual or group interactions are being studied [8, 42]. Simulated users are exposed to controlled conditions where their behavioral responses are observed across multiple rounds of interaction. These simulations help researchers examine decision-making processes, social influences, and cognitive biases in various experimental setups, such as consumer behavior studies or cooperative game simulations." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 578, + 505, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 578, + 505, + 645 + ], + "spans": [ + { + "bbox": [ + 104, + 578, + 505, + 645 + ], + "type": "text", + "content": "Social Media Interaction: The social media interaction scenario adopts an N-to-N structure, where multiple simulated users engage in dynamic, multi-round exchanges in an online setting [30]. This scenario captures real-time interactions, including content sharing, comment threads, and viral spread dynamics, allowing researchers to analyze public discourse, opinion shifts, and information diffusion on social platforms. It is particularly valuable for studying trends in misinformation, political discussions, and network-based influence propagation." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 658, + 201, + 669 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 658, + 201, + 669 + ], + "spans": [ + { + "bbox": [ + 105, + 658, + 201, + 669 + ], + "type": "text", + "content": "2.4 Behavior Engine" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 677, + 505, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 505, + 721 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 505, + 721 + ], + "type": "text", + "content": "Function The behavior engine aims to align the behaviors of the agents with that of real users. The behavior engine integrates user history and experience from the user engine, the interaction mechanism from the scenario engine and social context from the social environment to predict the behavior of each individual." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "type": "text", + "content": "Components To achieve credible behavior simulation, the behavior engine needs to provide a robust simulation foundation, including traditional agent-based models and a series of LLMs." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 100, + 504, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 100, + 504, + 166 + ], + "spans": [ + { + "bbox": [ + 104, + 100, + 504, + 166 + ], + "type": "text", + "content": "Traditional Agent-Based Modeling: Traditional agent-based modeling (ABM) relies on rule-based and mathematical models [9, 23, 32, 47, 52], where interactions among agents are typically realized through the broadcasting of predefined values. These values are derived from heuristic functions or theoretical mathematical formulations. Traditional ABM approaches are highly scalable and computationally efficient, making them well-suited for simulating large populations, especially marginal users with relatively limited influence." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 171, + 506, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 171, + 506, + 258 + ], + "spans": [ + { + "bbox": [ + 104, + 171, + 506, + 258 + ], + "type": "text", + "content": "LLM-powered Agents: LLMs leverage their role-playing capabilities to simulate user-generated content, and the abilities can be activated through various methods [29, 36, 51, 61-64]. Specifically, the behavior engine can be powered by general LLMs, expert LLMs, and domain-specific LLMs. Through non-parametric prompting, powerful general LLMs (e.g., GPT series and Qwen series) can act in accordance with predefined user profiles. Expert and domain-specific LLMs are acquired through parametric training, e.g., continual pretraining, supervised fine-tuning, and reinforcement learning. When target users exhibit complex profiles and the simulation requires deep domain expertise, these models are leveraged to enhance the professionalism and accuracy of agent behaviors." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 275, + 315, + 289 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 275, + 315, + 289 + ], + "spans": [ + { + "bbox": [ + 105, + 275, + 315, + 289 + ], + "type": "text", + "content": "3 Implementation of Specific Scenarios" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 300, + 504, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 300, + 504, + 356 + ], + "spans": [ + { + "bbox": [ + 104, + 300, + 504, + 356 + ], + "type": "text", + "content": "We implement three representative social simulation scenarios through the SocioVerse framework based on the implemented components: (a) presidential election prediction of America, (b) breaking news feedback analysis, and (c) national economic survey of China. These scenarios respectively address political communication, journalistic dissemination, and socioeconomic domains, demonstrating the framework's generalizability through standardized implementation pipelines." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 111, + 367, + 239, + 460 + ], + "blocks": [ + { + "bbox": [ + 111, + 367, + 239, + 460 + ], + "lines": [ + { + "bbox": [ + 111, + 367, + 239, + 460 + ], + "spans": [ + { + "bbox": [ + 111, + 367, + 239, + 460 + ], + "type": "image", + "image_path": "41b8f7e83330617a874286474fbb177b1c17d9e2e45f3b39659735bbc7457011.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 467, + 504, + 490 + ], + "lines": [ + { + "bbox": [ + 104, + 467, + 504, + 490 + ], + "spans": [ + { + "bbox": [ + 104, + 467, + 504, + 490 + ], + "type": "text", + "content": "Figure 3: Illustration of three scenarios representing (a) presidential election prediction, (b) breaking news feedback, and (c) national economic survey." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 246, + 367, + 372, + 460 + ], + "blocks": [ + { + "bbox": [ + 246, + 367, + 372, + 460 + ], + "lines": [ + { + "bbox": [ + 246, + 367, + 372, + 460 + ], + "spans": [ + { + "bbox": [ + 246, + 367, + 372, + 460 + ], + "type": "image", + "image_path": "9839e0e42e838abe4b61415c929efb4951f849533401bc247dd8cf0d2900ba80.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 380, + 367, + 500, + 460 + ], + "blocks": [ + { + "bbox": [ + 380, + 367, + 500, + 460 + ], + "lines": [ + { + "bbox": [ + 380, + 367, + 500, + 460 + ], + "spans": [ + { + "bbox": [ + 380, + 367, + 500, + 460 + ], + "type": "image", + "image_path": "5cd70d2f9aa944b30cd8014a3f27307a2828ab5935fadc5daf426be35f3eedea.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 511, + 315, + 522 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 511, + 315, + 522 + ], + "spans": [ + { + "bbox": [ + 105, + 511, + 315, + 522 + ], + "type": "text", + "content": "3.1 Presidential Election Prediction of America" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 532, + 504, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 532, + 504, + 597 + ], + "spans": [ + { + "bbox": [ + 104, + 532, + 504, + 597 + ], + "type": "text", + "content": "Task Description Presidential elections remain central to public engagement and party strategy formation [6, 46]. This study analyzes methods for large-scale election simulation using LLMs through the U.S. presidential system's Electoral College framework. In this indirect voting system, citizens vote for state electors (allocated by congressional representation) who formally elect the president. Most states employ a winner-takes-all allocation of electoral votes to the statewide majority winner, with our modeling focused on predicting these state-level outcomes." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 610, + 504, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 610, + 504, + 688 + ], + "spans": [ + { + "bbox": [ + 104, + 610, + 504, + 688 + ], + "type": "text", + "content": "Target Group Distribution Extensive research has documented the influence of demographic factors on election outcomes [33, 53]. We model U.S. demographic and ideological diversity through integrated Census Bureau (2022 voting/registration) and ANES (2020) data [1]. This scenario incorporates 12 attributes from the user engine: socioeconomic (income, education, employment), geographic (region, area), and political (party, ideology) dimensions alongside demographic factors (age, gender, race, marital status, and religious status). Given available marginal distributions, we employ iterative proportional fitting (IPF) to synthesize agent populations, see Appendix C.1." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "content": "Questionnaire Design We design the presidential election questionnaire based on abundant polls conducted by various media and research institutes [5, 24], incorporating both significant issues and" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "text", + "content": "voter preferences. These elements are then optimized into proper forms for LLM-based agents by the scenario engine. The entire questionnaire can be found in Appendix D.1." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 106, + 506, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 162 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 162 + ], + "type": "text", + "content": "Evaluation Metric Two metrics are used to comprehensively compare the simulated election results to the real-world results. (1) Accuracy rate (Acc) is measured by calculating the proportion of states for which the election simulation results align with the actual result, serving as a coarse-grained evaluation metric. (2) Root Mean Square Error (RMSE) is measured by calculating the simulated vote share and the actual vote share for each state, which serves as a fine-grained evaluation metric." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 174, + 239, + 186 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 174, + 239, + 186 + ], + "spans": [ + { + "bbox": [ + 105, + 174, + 239, + 186 + ], + "type": "text", + "content": "3.2 Breaking News Feedback" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 194, + 506, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 194, + 506, + 251 + ], + "spans": [ + { + "bbox": [ + 104, + 194, + 506, + 251 + ], + "type": "text", + "content": "Task Description Journalism plays a crucial role in shaping public perception and opinion through agenda-setting, framing, and information dissemination [20, 55]. Online social media platforms have gradually replaced the influence of traditional paper media. When breaking news is released on social media platforms, its potential audience may hold different stances. We take the release of ChatGPT as our target news to evaluate the accuracy and foreseeability of public attitudes." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 260, + 505, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 260, + 505, + 350 + ], + "spans": [ + { + "bbox": [ + 104, + 260, + 505, + 350 + ], + "type": "text", + "content": "Target Group Distribution We define all Rednote users in our pool as the universal set, identifying technology-interested users as the potential audience set " + }, + { + "bbox": [ + 104, + 260, + 505, + 350 + ], + "type": "inline_equation", + "content": "\\mathbb{P}" + }, + { + "bbox": [ + 104, + 260, + 505, + 350 + ], + "type": "text", + "content": ", and those discussing ChatGPT via keyword matching as the ground truth set " + }, + { + "bbox": [ + 104, + 260, + 505, + 350 + ], + "type": "inline_equation", + "content": "\\mathbb{G}" + }, + { + "bbox": [ + 104, + 260, + 505, + 350 + ], + "type": "text", + "content": ", with " + }, + { + "bbox": [ + 104, + 260, + 505, + 350 + ], + "type": "inline_equation", + "content": "\\mathbb{G} \\subset \\mathbb{P} \\subset UserPool" + }, + { + "bbox": [ + 104, + 260, + 505, + 350 + ], + "type": "text", + "content": ". Context is limited to pre-news timeframes to prevent leakage. Using the potential audience distribution as prior, we sample agents with identical distribution sampling (IDS) as " + }, + { + "bbox": [ + 104, + 260, + 505, + 350 + ], + "type": "inline_equation", + "content": "D_{s} = IDS(UserPool, \\mathbb{P})" + }, + { + "bbox": [ + 104, + 260, + 505, + 350 + ], + "type": "text", + "content": ", see Appendix C.2), considering demographics (gender, age, education, and consumption level) during sampling the user pools. Based on this, the task is to compare the consistency between the agents' attitudes toward news and those of the users in the ground truth set." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 358, + 504, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 358, + 504, + 416 + ], + "spans": [ + { + "bbox": [ + 104, + 358, + 504, + 416 + ], + "type": "text", + "content": "Questionnaire Design We design the cognitive questionnaire using the ABC attitude model (Affect, Behavior, Cognition) [28], which outlines attitude formation as a hierarchy: cognition affects emotions, guiding behavior. Combined with a 5-point Likert scale [22], the questionnaire covers six dimensions: public cognition (PC), perceived risks (PR), perceived benefits (PB), trust (TR), fairness (FA), and public acceptance (PA). See Appendix D.2 for details." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 425, + 504, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 425, + 504, + 482 + ], + "spans": [ + { + "bbox": [ + 104, + 425, + 504, + 482 + ], + "type": "text", + "content": "Evaluation Metric Agents from both sets answer the questionnaire for paired responses. Two evaluation dimensions assess feedback: (1) Normalized RMSE (NRMSE) measures point-wise differences between simulated and ground truth answers across PC, PR, PB, TR, FA, and PA as value evaluations; (2) KL-divergence (KL-Div) compares the 6-dimensional answer distributions between groups as consistency evaluations." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 493, + 284, + 506 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 493, + 284, + 506 + ], + "spans": [ + { + "bbox": [ + 105, + 493, + 284, + 506 + ], + "type": "text", + "content": "3.3 National Economic Survey of China" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 513, + 505, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 513, + 505, + 592 + ], + "spans": [ + { + "bbox": [ + 104, + 513, + 505, + 592 + ], + "type": "text", + "content": "Task Description Economic simulation is another crucial part of massive social simulations as it models resource distribution, market dynamics, and financial behaviors, providing insights into economic stability and policy impacts [13, 54]. By integrating economic factors with social interactions, it enhances the prediction of systemic outcomes, guiding decision-making in areas such as governance, urban planning, and crisis management. We follow a national economic survey conducted by the National Bureau of Statistics of China, which interviews Chinese citizens on their monthly spending given the average salary of each province in China." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 601, + 504, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 601, + 504, + 657 + ], + "spans": [ + { + "bbox": [ + 104, + 601, + 504, + 657 + ], + "type": "text", + "content": "Target Group Distribution The prior distribution is based on the methodology from the National Bureau of Statistics of China, which takes 160,000 families nationwide and calculates their incomes and spending as the national average statistics [39]. We sample nationwide agents from our user pool proportionally according to their region population and generate their income distribution according to the regional average income [38]. The detailed method can be referred to in Appendix C.3." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 667, + 507, + 724 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 667, + 507, + 724 + ], + "spans": [ + { + "bbox": [ + 104, + 667, + 507, + 724 + ], + "type": "text", + "content": "Questionnaire Design Spending details in China Statistical Yearbook 2024 [40] are categorized into eight parts, i.e. food, clothing, housing, daily necessities & services, communication & transportation, education & entertainment, healthcare, and others. Consequently, the questionnaire design covers the above categories with examples and uses segmented interval options in each question. The entire questionnaire can be referred to in Appendix D.3." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "text", + "content": "Evaluation Metric Both value evaluation and distribution evaluation are involved in the national economic survey as well. (1) NRMSE of the nine categories is measured between the simulated results and official statistics. (2) KL-Div is measured by taking the 8-item spending as a distribution to evaluate the consistency between the simulation and the real world." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 133, + 164, + 145 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 133, + 164, + 145 + ], + "spans": [ + { + "bbox": [ + 105, + 133, + 164, + 145 + ], + "type": "text", + "content": "4 Results" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 158, + 446, + 170 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 158, + 446, + 170 + ], + "spans": [ + { + "bbox": [ + 104, + 158, + 446, + 170 + ], + "type": "text", + "content": "4.1 SocioVerse Can Support Diverse and Accurate Massive Social Simulations" + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 106, + 184, + 504, + 235 + ], + "blocks": [ + { + "bbox": [ + 106, + 184, + 504, + 235 + ], + "lines": [ + { + "bbox": [ + 106, + 184, + 504, + 235 + ], + "spans": [ + { + "bbox": [ + 106, + 184, + 504, + 235 + ], + "type": "table", + "html": "
Scenario# Agents# DemographicsTypeSamplingSourceLanguage# QuestionsGround truth
PresElectPredict331,83612labelIPFXEN49real world
BreakNewsFeed20,0007labelIDSrednoteZH18calculated
NatEconSurvey16,0009label+numberIDSrednoteZH17real world
", + "image_path": "7cb07d45660942f4481e7ecfce1c3d40f9a6ce0877b27cea63d127d5b346085f.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 287, + 506, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 287, + 506, + 321 + ], + "spans": [ + { + "bbox": [ + 104, + 287, + 506, + 321 + ], + "type": "text", + "content": "Experiment Settings We select powerful LLMs from different model families. For open-sourced models, we select Llama-3-70b-Instruct [14], Qwen2.5-72b-Instruct [59], DeepSeek-R1-671b [19], and DeepSeek-V3 [27]. For commercial models, we select GPT-4o" + }, + { + "bbox": [ + 104, + 287, + 506, + 321 + ], + "type": "inline_equation", + "content": "^3" + }, + { + "bbox": [ + 104, + 287, + 506, + 321 + ], + "type": "text", + "content": " [41] and GPT-4o-mini" + }, + { + "bbox": [ + 104, + 287, + 506, + 321 + ], + "type": "inline_equation", + "content": "^4" + }, + { + "bbox": [ + 104, + 287, + 506, + 321 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 325, + 505, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 325, + 505, + 403 + ], + "spans": [ + { + "bbox": [ + 104, + 325, + 505, + 403 + ], + "type": "text", + "content": "We compare the settings of all three scenarios for better understanding, which is shown in Table 2. As the Presidential Election Prediction covers a 1-in-1,000 sample of the U.S. population, GPT-4o is excluded from comparison due to cost constraints. In terms of local model serving, Qwen2.5-72b-Instruct and Llama3-70b-Instruct models are both deployed on 8 NVIDIA RTX4090 GPUs via vLLM [25]. We set max tokens to 2,048 for all models to enable chain-of-thoughts during the generation and the temperature is set to 0.7 to encourage diversity. Implementation details for user pool construction and demographics annotation can be found in Appendix A and B." + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 106, + 413, + 504, + 520 + ], + "blocks": [ + { + "bbox": [ + 104, + 238, + 504, + 274 + ], + "lines": [ + { + "bbox": [ + 104, + 238, + 504, + 274 + ], + "spans": [ + { + "bbox": [ + 104, + 238, + 504, + 274 + ], + "type": "text", + "content": "Table 2: Detail settings of three simulation scenarios, where PresElectPredict, BreakNewsFeed, and NatEconSurvey denote three simulations mentioned in the paper, respectively. IPF and IDS denote iterative proportional fitting and identical distribution sampling, see Appendix C." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 413, + 504, + 520 + ], + "lines": [ + { + "bbox": [ + 106, + 413, + 504, + 520 + ], + "spans": [ + { + "bbox": [ + 106, + 413, + 504, + 520 + ], + "type": "table", + "html": "
ModelPresElectPredictBreakNewsFeedNatEconSurvey
OverallBattlegroundOverallDeveloped-region
Acc↑RMSE↓Acc↑RMSE↓KL-Div↓RMSE↓KL-Div↓RMSE↓KL-Div↓RMSE↓
Llama3-70b0.8430.0640.7330.0450.6680.1990.0160.0260.0130.025
Qwen2.5-72b0.9220.0370.8000.0310.1130.0590.0660.0480.0430.039
DeepSeek-R1-671b\\\\0.6700.0650.3830.0820.0590.0450.0450.036
DeepSeek-V30.9220.0460.8670.0410.2630.0720.0350.0360.0230.030
GPT-4o-mini\\\\0.8000.0390.1950.1140.0460.0450.0300.036
GPT-4o\\\\\\\\0.1960.0550.0620.0510.0360.038
", + "image_path": "c8315fc00f5be85637036238e2bbe85316d8d30a637370c74716c310a2613d7d.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 522, + 504, + 556 + ], + "lines": [ + { + "bbox": [ + 104, + 522, + 504, + 556 + ], + "spans": [ + { + "bbox": [ + 104, + 522, + 504, + 556 + ], + "type": "text", + "content": "Table 3: Overall results of the three scenarios, where subset Battleground indicates battleground states in the U.S. in the presidential election and subset Developed-Region indicates top-10 developed regions in China in terms of GDP." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 571, + 504, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 571, + 504, + 594 + ], + "spans": [ + { + "bbox": [ + 104, + 571, + 504, + 594 + ], + "type": "text", + "content": "Results The overall simulation results of the three scenarios are shown in Table 3. We also report subset results for presidential election prediction and national economic survey." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 602, + 506, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 602, + 506, + 691 + ], + "spans": [ + { + "bbox": [ + 132, + 602, + 506, + 691 + ], + "type": "text", + "content": "- Presidential Election Prediction We report the overall results and the battleground states' results separately. The prediction of battleground states is challenging even in the real world and thus becomes the focus during the election process. According to the results, GPT-4o-mini and Qwen2.5-72b show competitive performance both in Acc and RMSE. Typically, according to the winner-takes-all rule, over " + }, + { + "bbox": [ + 132, + 602, + 506, + 691 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 132, + 602, + 506, + 691 + ], + "type": "text", + "content": " state voting results are predicted correctly, which means the simulation achieves a high-precision macroscopic reduction of the real-world election results. After the case study, we find that DeepSeek-R1-671b sometimes falls into overthinking, resulting in less accurate results." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 118, + 700, + 205, + 711 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 700, + 205, + 711 + ], + "spans": [ + { + "bbox": [ + 118, + 700, + 205, + 711 + ], + "type": "text", + "content": "3gpt-4o-2024-08-06" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 118, + 710, + 228, + 722 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 710, + 228, + 722 + ], + "spans": [ + { + "bbox": [ + 118, + 710, + 228, + 722 + ], + "type": "text", + "content": "4gpt-4o-mini-2024-07-18" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 72, + 506, + 211 + ], + "type": "list", + "angle": 0, + "index": 2, + "blocks": [ + { + "bbox": [ + 132, + 72, + 504, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 72, + 504, + 139 + ], + "spans": [ + { + "bbox": [ + 132, + 72, + 504, + 139 + ], + "type": "text", + "content": "- Breaking News Feedback The results measure the overall consistency of each model compared with the real-world users' reactions and attitudes. To this end, the performances of GPT-4o and Qwen2.5-72b are more aligned with real-world perspectives than other models in terms of KL-Div and NRMSE, respectively, and the following detailed analysis will demonstrate that the models consistently capture and accurately predict public trends and opinions." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 132, + 143, + 506, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 143, + 506, + 211 + ], + "spans": [ + { + "bbox": [ + 132, + 143, + 506, + 211 + ], + "type": "text", + "content": "- National Economic Survey We report the overall results and results for the top 10 regions by GDP (i.e., developed regions) separately. Generally, all the models closely align with real-world statistics. Llama3-70b shows a significant superiority over other models in the economic survey scenario and all the models perform better in the 1st-Region subset than overall. The results demonstrate that individuals' spending habits can be accurately reproduced under the SocioVerse framework, especially in developed regions." + } + ] + } + ], + "index": 1 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 218, + 506, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 218, + 506, + 275 + ], + "spans": [ + { + "bbox": [ + 104, + 218, + 506, + 275 + ], + "type": "text", + "content": "The overall results from both value evaluation and distribution evaluation of three simulations sufficiently prove that SocioVerse can support diverse and accurate massive social simulations with a standard pipeline and minimal changes with human experts in the loop. However, the choice of underlying LLMs can affect simulation precision across different scenarios, highlighting the need for further study." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 287, + 493, + 312 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 287, + 493, + 312 + ], + "spans": [ + { + "bbox": [ + 104, + 287, + 493, + 312 + ], + "type": "text", + "content": "4.2 Prior Distribution and Real-World Knowledge Can Enhance Simulation Accuracy in Presidential Election Predictions" + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 206, + 323, + 405, + 454 + ], + "blocks": [ + { + "bbox": [ + 206, + 323, + 405, + 454 + ], + "lines": [ + { + "bbox": [ + 206, + 323, + 405, + 454 + ], + "spans": [ + { + "bbox": [ + 206, + 323, + 405, + 454 + ], + "type": "table", + "html": "
ModelAcc↑RMSE↓
Llama3-70b0.7330.045
- w/o Knowledge0.5330.051
- w/o Knowledge & Piror Distribution0.6000.386
Qwen2.5-72b0.8000.031
- w/o Knowledge0.8000.033
- w/o Knowledge & Piror Distribution0.6000.370
GPT-4o-mini0.8000.039
- w/o Knowledge0.8000.052
- w/o Knowledge & Piror Distribution0.6670.323
", + "image_path": "1c254baddeee82d257854c6de8cf0642cfab2a1bf3f197f0a8fb52bcd38fa176.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 458, + 504, + 492 + ], + "lines": [ + { + "bbox": [ + 104, + 458, + 504, + 492 + ], + "spans": [ + { + "bbox": [ + 104, + 458, + 504, + 492 + ], + "type": "text", + "content": "Table 4: Ablation experiment results on the presidential election prediction simulation, where -w/o Knowledge denotes without real-world user knowledge and -w/o Piror Distribution denotes using random demographics distribution." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 501, + 506, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 501, + 506, + 578 + ], + "spans": [ + { + "bbox": [ + 104, + 501, + 506, + 578 + ], + "type": "text", + "content": "We conduct an ablation study on the presidential election prediction simulation to assess the impact of prior demographics distribution and real-world user knowledge. As shown in Table 4, prior demographics distribution significantly improves the accuracy of the simulation in both Acc and RMSE compared to random demographics distribution. Additionally, past posts from users on social media platforms improve the fine-grained performance, especially for Llama3-70b in Acc and all the models in RMSE. We can tell from the ablation study that both prior distribution and real-world knowledge in the SocioVerse pipeline are significant during the simulation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 592, + 500, + 605 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 592, + 500, + 605 + ], + "spans": [ + { + "bbox": [ + 104, + 592, + 500, + 605 + ], + "type": "text", + "content": "4.3 Group Preference and Perspectives Can Be Well Reflected in Breaking News Feedback" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 612, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 612, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 612, + 506, + 723 + ], + "type": "text", + "content": "During the Breaking News Feedback simulation, the core concern is whether the preferences and perspectives of the target group are well captured and reflected in the results. We reformulate the original questionnaire into the Likert 6-dimension scale ranging from 1 to 5 points, representing from totally disagree to totally agree. As the ground truth of the simulation is calculated by prompting LLM agents from the ground truth set, the simulated and real results are paired for each model, as shown in Figure 4. All the models powered by the potential audience set during the simulation tend to behave consistently with the ground truth users. However, Llama3-70b perform poorly with a larger gap between the simulated and real results than other models. GPT-4o-mini shows different attitudes in the fairness (FA) and public acceptance (PA) dimensions, which may be because the news is related to OpenAI. Another trend indicates that, generally, all the models perform more" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 70, + 504, + 150 + ], + "blocks": [ + { + "bbox": [ + 107, + 70, + 504, + 150 + ], + "lines": [ + { + "bbox": [ + 107, + 70, + 504, + 150 + ], + "spans": [ + { + "bbox": [ + 107, + 70, + 504, + 150 + ], + "type": "image", + "image_path": "f30fe370c7d43ea99207a6f091595cf5559b34c28ba881f238d60524e7804d01.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 156, + 506, + 191 + ], + "lines": [ + { + "bbox": [ + 104, + 156, + 506, + 191 + ], + "spans": [ + { + "bbox": [ + 104, + 156, + 506, + 191 + ], + "type": "text", + "content": "Figure 4: An illustration of the performances of the breaking news feedback simulation, where PC, PR, PB, TR, FA, and PA denote six dimensions from the Likert scale (see §3.2 questionnaire design), with 1-point standing for totally disagree and 5-point for totally agree." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 213, + 504, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 213, + 504, + 236 + ], + "spans": [ + { + "bbox": [ + 104, + 213, + 504, + 236 + ], + "type": "text", + "content": "disagreeably in the simulated results than the real results, which also underlines the potential risk of biases during the public opinion simulation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 250, + 483, + 262 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 250, + 483, + 262 + ], + "spans": [ + { + "bbox": [ + 104, + 250, + 483, + 262 + ], + "type": "text", + "content": "4.4 The Capabilities of LLMs Vary in Different Domains in National Economic Survey" + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 146, + 277, + 463, + 376 + ], + "blocks": [ + { + "bbox": [ + 146, + 277, + 463, + 376 + ], + "lines": [ + { + "bbox": [ + 146, + 277, + 463, + 376 + ], + "spans": [ + { + "bbox": [ + 146, + 277, + 463, + 376 + ], + "type": "table", + "html": "
ItemLlama3-70bQwen2.5-72bGPT-4o-miniGPT-4oDeepSeek-R1
Daily0.0070.0090.0060.0100.009
Clothing0.0120.0150.0190.0150.015
Transportation_Comunication0.0160.0200.0270.0230.017
Education_Entertainment0.0180.0220.0240.0170.022
Medical0.0230.0620.0410.0570.060
Food0.0370.0310.0310.0400.032
Household0.0520.1100.1070.1200.102
Others0.0080.0080.0100.0050.009
", + "image_path": "9971298eafb0097555473200ebe95969169d99fbf1d407eea813867188612681.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 380, + 504, + 413 + ], + "lines": [ + { + "bbox": [ + 104, + 380, + 504, + 413 + ], + "spans": [ + { + "bbox": [ + 104, + 380, + 504, + 413 + ], + "type": "text", + "content": "Table 5: Detailed results on the national economic survey simulation reported in NRMSE, where the Item column indicates the components of spending. The best results are **bolded*; the second-best results are underlined." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 421, + 505, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 421, + 505, + 499 + ], + "spans": [ + { + "bbox": [ + 104, + 421, + 505, + 499 + ], + "type": "text", + "content": "The simulation of the national economic survey covers 8 spending dimensions. The overall results in Table 3 show the average performance of these dimensions, while model performances among these dimensions can also vary. We calculate the averaged NRMSE of 31 regions on each spending level, as shown in Table 5. It is worth mentioning that all the models show high consistency. Eliminating the others item, all the models perform best on daily necessities spending planning and worst on housing spending, which can reveal the LLM's preference on the economic decision-making and highlight the challenge in housing spending strategy." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 515, + 180, + 529 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 515, + 180, + 529 + ], + "spans": [ + { + "bbox": [ + 105, + 515, + 180, + 529 + ], + "type": "text", + "content": "5 Discussion" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 541, + 506, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 541, + 506, + 629 + ], + "spans": [ + { + "bbox": [ + 104, + 541, + 506, + 629 + ], + "type": "text", + "content": "In this study, we introduce a generalized social simulation framework SocioVerse and evaluated its performance across three distinct real-world scenarios. Our findings indicate that state-of-the-art LLMs demonstrate a notable ability to simulate human responses in complex social contexts, although some gaps still remain between the simulated response and observed real-world outcomes. Therefore, future research may need to incorporate a broader range of scenarios and develop more fine-grained evaluations built upon the current analytic engine, to further explore and expand the boundaries of LLMs' simulation capabilities. Such efforts could pave the way for establishing LLMs as comprehensive and reliable tools for large-scale social simulation." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "type": "text", + "content": "We observed several key patterns across the simulations of the scenarios. First, incorporating demographic distributions and users' historical experiences significantly improved simulation accuracy. These findings highlight the importance of building a large, demographically rich user pool, complemented by a multi-dimensional user tagging system for more precise modeling of group-specific behaviors. Second, under consistent measurement protocols, LLMs produced broadly similar simulations of human attitudes and ideologies. However, certain models, such as GPT-4o-mini, showed notable inconsistencies, indicating that model-specific preferences or biases remain influential and warrant closer scrutiny in future work. Finally, we found that while LLMs perform well in simple daily" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 72, + 504, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 72, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 107, + 72, + 504, + 95 + ], + "type": "text", + "content": "scenarios, they underperform in complex situations requiring contextual knowledge, underscoring the need to align model behavior with real-world experiences and social contexts." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 107, + 99, + 506, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 99, + 506, + 209 + ], + "spans": [ + { + "bbox": [ + 107, + 99, + 506, + 209 + ], + "type": "text", + "content": "Notably, the current version has only implemented part of our framework, indicating significant potential for enhancing the accuracy and quality of social simulations. Future work can focus on refining each module for better collaboration, enabling the framework to achieve its full potential. For instance, the incorporation of the social environment can inject up-to-date knowledge into LLMs, enhancing the understanding of social dynamics. The scenario engine can not only provide survey-based simulation but also expand to diverse formats such as social interviews and free interactions. Additionally, further optimization of the general LLMs and expert LLMs adaptation in the behavior engine will enable better accommodation of complex target user groups, such as minority groups and individuals with special disabilities. The analysis engine can introduce an autonomous planning module to improve the overall credibility of simulation results." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 214, + 506, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 214, + 506, + 313 + ], + "spans": [ + { + "bbox": [ + 107, + 214, + 506, + 313 + ], + "type": "text", + "content": "Beyond the social simulation framework, our work underscores the potential to bridge the gap between autonomous AI systems and traditional social science, offering social scientists a seamless, cost-effective tool for conducting social experiments with minimal setup. Such tools not only help analyze and validate psychological and sociological theories or hypotheses, such as behavioral economics and social identity theory, but also assist in predicting large-scale social impacts like policy changes, social movements, or public health crises. By providing an efficient and scalable simulation environment, our framework is not just a research tool, but an experimental platform for exploring the dynamic changes and long-term trends of virtual societies, with the aim of becoming a realistic mapping for real-world societies." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 328, + 202, + 342 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 328, + 202, + 342 + ], + "spans": [ + { + "bbox": [ + 107, + 328, + 202, + 342 + ], + "type": "text", + "content": "Acknowledgement" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 353, + 504, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 353, + 504, + 387 + ], + "spans": [ + { + "bbox": [ + 107, + 353, + 504, + 387 + ], + "type": "text", + "content": "We would like to express our sincere gratitude to Professor Rongwei Chu and his research team for their invaluable support in this work. The project's computational resources are supported by the CFFF platform of Fudan University." + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 301, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 301, + 741, + 310, + 750 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 71, + 165, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 71, + 165, + 84 + ], + "spans": [ + { + "bbox": [ + 106, + 71, + 165, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 89, + 505, + 722 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 111, + 89, + 505, + 110 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 89, + 505, + 110 + ], + "spans": [ + { + "bbox": [ + 111, + 89, + 505, + 110 + ], + "type": "text", + "content": "[1] American National Election Studies. Anes 2020 time series study full release [dataset and documentation], 2021. February 10, 2022 version." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 118, + 505, + 147 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 118, + 505, + 147 + ], + "spans": [ + { + "bbox": [ + 111, + 118, + 505, + 147 + ], + "type": "text", + "content": "[2] J. R. Anthis, R. Liu, S. M. Richardson, A. C. Kozlowski, B. Koch, J. Evans, E. Brynjolfsson, and M. Bernstein. LIm social simulations are a promising research method. arXiv preprint arXiv:2504.02234, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 156, + 504, + 177 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 156, + 504, + 177 + ], + "spans": [ + { + "bbox": [ + 111, + 156, + 504, + 177 + ], + "type": "text", + "content": "[3] L. P. Argyle, E. C. Busby, N. Fulda, J. R. Gubler, C. Rytting, and D. Wingate. Out of one, many: Using language models to simulate human samples. Political Analysis, 31(3):337-351, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 184, + 504, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 184, + 504, + 215 + ], + "spans": [ + { + "bbox": [ + 111, + 184, + 504, + 215 + ], + "type": "text", + "content": "[4] Z. Bao, Q. Liu, Y. Guo, Z. Ye, J. Shen, S. Xie, J. Peng, X. Huang, and Z. Wei. Piers: Personalized intelligent outpatient reception based on large language model with multi-agents medical scenario simulation. arXiv preprint arXiv:2411.13902, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 223, + 504, + 243 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 223, + 504, + 243 + ], + "spans": [ + { + "bbox": [ + 111, + 223, + 504, + 243 + ], + "type": "text", + "content": "[5] A. Barnett and A. Sarfati. The polls and the us presidential election in 2020.... and 2024. Statistics and Public Policy, 10(1):2199809, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 251, + 505, + 273 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 251, + 505, + 273 + ], + "spans": [ + { + "bbox": [ + 111, + 251, + 505, + 273 + ], + "type": "text", + "content": "[6] L. M. Bartels. Uninformed votes: Information effects in presidential elections. American journal of political science, pages 194-230, 1996." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 281, + 504, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 281, + 504, + 300 + ], + "spans": [ + { + "bbox": [ + 111, + 281, + 504, + 300 + ], + "type": "text", + "content": "[7] I. Beltagy, M. E. Peters, and A. Cohan. Longformer: The long-document transformer. arXiv preprint arXiv:2004.05150, 2020." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 308, + 504, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 308, + 504, + 330 + ], + "spans": [ + { + "bbox": [ + 111, + 308, + 504, + 330 + ], + "type": "text", + "content": "[8] A. K. Chandra, D. C. Kozen, and L. J. Stockmeyer. Alternation. Journal of the Association for Computing Machinery, 28(1):114-133, 1981." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 337, + 504, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 337, + 504, + 358 + ], + "spans": [ + { + "bbox": [ + 111, + 337, + 504, + 358 + ], + "type": "text", + "content": "[9] Y.-S. Chuang and T. T. Rogers. Computational agent-based models in opinion dynamics: A survey on social simulations and empirical studies. arXiv preprint arXiv:2306.03446, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 365, + 504, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 365, + 504, + 396 + ], + "spans": [ + { + "bbox": [ + 106, + 365, + 504, + 396 + ], + "type": "text", + "content": "[10] V. Cologna, N. G. Mede, S. Berger, J. Besley, C. Brick, M. Joubert, E. W. Maibach, S. Mihelj, N. Oreskes, M. S. Schäfer, et al. Trust in scientists and their role in society across 68 countries. Nature Human Behaviour, pages 1–18, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 403, + 323, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 403, + 323, + 415 + ], + "spans": [ + { + "bbox": [ + 106, + 403, + 323, + 415 + ], + "type": "text", + "content": "[11] T. Connolly. Micromotives and macrobehavior., 1979." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 423, + 504, + 444 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 423, + 504, + 444 + ], + "spans": [ + { + "bbox": [ + 106, + 423, + 504, + 444 + ], + "type": "text", + "content": "[12] J. Devlin, M.-W. Chang, K. Lee, and K. Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2019." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 451, + 504, + 482 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 451, + 504, + 482 + ], + "spans": [ + { + "bbox": [ + 106, + 451, + 504, + 482 + ], + "type": "text", + "content": "[13] F. Dignum, V. Dignum, P. Davidsson, A. Ghorbani, M. van der Hurk, M. Jensen, C. Kammler, F. Lorig, L. G. Ludescher, A. Melchior, et al. Analysing the combined health, social and economic impacts of the coronavirus pandemic using agent-based social simulation. *Minds and Machines*, 30:177–194, 2020." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 490, + 504, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 490, + 504, + 511 + ], + "spans": [ + { + "bbox": [ + 106, + 490, + 504, + 511 + ], + "type": "text", + "content": "[14] A. Dubey, A. Jauhri, A. Pandey, A. Kadian, A. Al-Dahle, A. Letman, A. Mathur, A. Schelten, A. Yang, A. Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 518, + 504, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 518, + 504, + 548 + ], + "spans": [ + { + "bbox": [ + 106, + 518, + 504, + 548 + ], + "type": "text", + "content": "[15] C. Gao, X. Lan, N. Li, Y. Yuan, J. Ding, Z. Zhou, F. Xu, and Y. Li. Large language models empowered agent-based modeling and simulation: A survey and perspectives. *Humanities and Social Sciences Communications*, 11(1):1-24, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 556, + 504, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 556, + 504, + 578 + ], + "spans": [ + { + "bbox": [ + 106, + 556, + 504, + 578 + ], + "type": "text", + "content": "[16] C. Gao, X. Lan, Z. Lu, J. Mao, J. Piao, H. Wang, D. Jin, and Y. Li. S3: Social-network simulation system with large language model-empowered agents. arXiv preprint arXiv:2307.14984, 2023." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 586, + 504, + 616 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 586, + 504, + 616 + ], + "spans": [ + { + "bbox": [ + 106, + 586, + 504, + 616 + ], + "type": "text", + "content": "[17] S. Giorgi, V. E. Lynn, K. Gupta, F. Ahmed, S. Matz, L. H. Ungar, and H. A. Schwartz. Correcting sociodemographic selection biases for population prediction from social media. In Proceedings of the International AAAI Conference on Web and Social Media, volume 16, pages 228-240, 2022." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 106, + 624, + 504, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 624, + 504, + 654 + ], + "spans": [ + { + "bbox": [ + 106, + 624, + 504, + 654 + ], + "type": "text", + "content": "[18] B. E. GOLDSMITH, Y. HORIUCHI, and K. MATUSH. Does public diplomacy sway foreign public opinion? identifying the effect of high-level visits. American Political Science Review, 115(4):1342-1357, 2021." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 662, + 504, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 662, + 504, + 693 + ], + "spans": [ + { + "bbox": [ + 106, + 662, + 504, + 693 + ], + "type": "text", + "content": "[19] D. Guo, D. Yang, H. Zhang, J. Song, R. Zhang, R. Xu, Q. Zhu, S. Ma, P. Wang, X. Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 106, + 700, + 504, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 700, + 504, + 722 + ], + "spans": [ + { + "bbox": [ + 106, + 700, + 504, + 722 + ], + "type": "text", + "content": "[20] B. Gómez-Calderón and Y. Ceballos. Journalism and artificial intelligence: the treatment of the chatbots in the Spanish press. index.comunicación, 14(1):281–300, Jan. 2024." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "text", + "content": "[21] J. C. Jackson, D. Rand, K. Lewis, M. I. Norton, and K. Gray. Agent-based modeling: A guide for social psychologists. Social Psychological and Personality Science, 8(4):387-395, 2017." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 101, + 506, + 124 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 101, + 506, + 124 + ], + "spans": [ + { + "bbox": [ + 106, + 101, + 506, + 124 + ], + "type": "text", + "content": "[22] A. Joshi, S. Kale, S. Chandel, and D. K. Pal. Likert scale: Explored and explained. British journal of applied science & technology, 7(4):396-403, 2015." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 130, + 506, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 130, + 506, + 152 + ], + "spans": [ + { + "bbox": [ + 107, + 130, + 506, + 152 + ], + "type": "text", + "content": "[23] M. Jusup, P. Holme, K. Kanazawa, M. Takayasu, I. Romić, Z. Wang, S. Geček, T. Lipić, B. Podobnik, L. Wang, et al. Social physics. Physics Reports, 948:1-148, 2022." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 159, + 504, + 181 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 159, + 504, + 181 + ], + "spans": [ + { + "bbox": [ + 107, + 159, + 504, + 181 + ], + "type": "text", + "content": "[24] S. Keeter, N. Hatley, A. Lau, and C. Kennedy. What 2020's election poll errors tell us about the accuracy of issue polling. Pew Research Center Methods, 2021." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 188, + 504, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 188, + 504, + 220 + ], + "spans": [ + { + "bbox": [ + 107, + 188, + 504, + 220 + ], + "type": "text", + "content": "[25] W. Kwon, Z. Li, S. Zhuang, Y. Sheng, L. Zheng, C. H. Yu, J. E. Gonzalez, H. Zhang, and I. Stoica. Efficient memory management for large language model serving with pagedattention. In Proceedings of the ACM SIGOPS 29th Symposium on Operating Systems Principles, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 227, + 506, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 227, + 506, + 258 + ], + "spans": [ + { + "bbox": [ + 107, + 227, + 506, + 258 + ], + "type": "text", + "content": "[26] S. Lee, T.-Q. Peng, M. H. Goldberg, S. A. Rosenthal, J. E. Kotcher, E. W. Maibach, and A. Leiserowitz. Can large language models capture public opinion about global warming? an empirical assessment of algorithmic fidelity and bias. arXiv preprint arXiv:2311.00217, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 266, + 504, + 288 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 266, + 504, + 288 + ], + "spans": [ + { + "bbox": [ + 107, + 266, + 504, + 288 + ], + "type": "text", + "content": "[27] A. Liu, B. Feng, B. Xue, B. Wang, B. Wu, C. Lu, C. Zhao, C. Deng, C. Zhang, C. Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 294, + 504, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 294, + 504, + 317 + ], + "spans": [ + { + "bbox": [ + 107, + 294, + 504, + 317 + ], + "type": "text", + "content": "[28] B. Liu, Y. Xu, Y. Yang, and S. Lu. How public cognition influences public acceptance of ccus in china: Based on the abc (affect, behavior, and cognition) model of attitudes. Energy Policy, 156:112390, 2021." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 323, + 504, + 355 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 323, + 504, + 355 + ], + "spans": [ + { + "bbox": [ + 107, + 323, + 504, + 355 + ], + "type": "text", + "content": "[29] X. Liu, S. Yang, X. Zhang, H. Kuang, L. Sun, Y. Yang, S. Chen, X. Huang, and Z. Wei. Ai-press: A multi-agent news generating and feedback simulation system powered by large language models. arXiv preprint arXiv:2410.07561, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 363, + 504, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 363, + 504, + 384 + ], + "spans": [ + { + "bbox": [ + 107, + 363, + 504, + 384 + ], + "type": "text", + "content": "[30] Y. Liu, X. Chen, X. Zhang, X. Gao, J. Zhang, and R. Yan. From skepticism to acceptance: Simulating the attitude dynamics toward fake news. arXiv preprint arXiv:2403.09498, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 392, + 506, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 392, + 506, + 422 + ], + "spans": [ + { + "bbox": [ + 107, + 392, + 506, + 422 + ], + "type": "text", + "content": "[31] H. Lyu, S. Jiang, H. Zeng, Y. Xia, Q. Wang, S. Zhang, R. Chen, C. Leung, J. Tang, and J. Luo. Llm-rec: Personalized recommendation via prompting large language models. arXiv preprint arXiv:2307.15780, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 430, + 504, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 430, + 504, + 453 + ], + "spans": [ + { + "bbox": [ + 107, + 430, + 504, + 453 + ], + "type": "text", + "content": "[32] C. M. Macal and M. J. North. Agent-based modeling and simulation. In Proceedings of the 2009 winter simulation conference (WSC), pages 86-98. IEEE, 2009." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 459, + 506, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 459, + 506, + 491 + ], + "spans": [ + { + "bbox": [ + 107, + 459, + 506, + 491 + ], + "type": "text", + "content": "[33] B. Major, A. Blodorn, and G. Major Blascovich. The threat of increasing diversity: Why many white americans support trump in the 2016 presidential election. Group Processes & Intergroup Relations, 21(6):931-940, 2018." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 498, + 504, + 530 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 498, + 504, + 530 + ], + "spans": [ + { + "bbox": [ + 107, + 498, + 504, + 530 + ], + "type": "text", + "content": "[34] X. Mou, X. Ding, Q. He, L. Wang, J. Liang, X. Zhang, L. Sun, J. Lin, J. Zhou, X. Huang, et al. From individual to society: A survey on social simulation driven by large language model-based agents. arXiv preprint arXiv:2412.03563, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 536, + 504, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 536, + 504, + 568 + ], + "spans": [ + { + "bbox": [ + 107, + 536, + 504, + 568 + ], + "type": "text", + "content": "[35] X. Mou, Z. Li, H. Lyu, J. Luo, and Z. Wei. Unifying local and global knowledge: Empowering large language models as political experts with knowledge graphs. In Proceedings of the ACM Web Conference 2024, pages 2603–2614, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 575, + 506, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 575, + 506, + 597 + ], + "spans": [ + { + "bbox": [ + 107, + 575, + 506, + 597 + ], + "type": "text", + "content": "[36] X. Mou, J. Liang, J. Lin, X. Zhang, X. Liu, S. Yang, R. Ye, L. Chen, H. Kuang, X. Huang, and Z. Wei. Agentsense: Benchmarking social intelligence of language agents through interactive scenarios, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 605, + 504, + 627 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 605, + 504, + 627 + ], + "spans": [ + { + "bbox": [ + 107, + 605, + 504, + 627 + ], + "type": "text", + "content": "[37] X. Mou, Z. Wei, and X. Huang. Unveiling the truth and facilitating change: Towards agent-based large-scale social movement simulation. arXiv preprint arXiv:2402.16333, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 634, + 506, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 634, + 506, + 655 + ], + "spans": [ + { + "bbox": [ + 107, + 634, + 506, + 655 + ], + "type": "text", + "content": "[38] NBS China. Communiqué of the Seventh National Population Census of the People's Republic of China. Technical report, 2023. Accessed: 2025-02-14." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 107, + 662, + 504, + 684 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 662, + 504, + 684 + ], + "spans": [ + { + "bbox": [ + 107, + 662, + 504, + 684 + ], + "type": "text", + "content": "[39] NBS China. Explanatory Notes on Main Statistical Indicators – Population, Society, and Labor (China Statistical Yearbook 2023), 2023. Accessed: 2025-02-14." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 107, + 691, + 402, + 703 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 691, + 402, + 703 + ], + "spans": [ + { + "bbox": [ + 107, + 691, + 402, + 703 + ], + "type": "text", + "content": "[40] NBS China. China Statistical Yearbook 2024, 2024. Accessed: 2025-02-14." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 107, + 711, + 413, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 711, + 413, + 723 + ], + "spans": [ + { + "bbox": [ + 107, + 711, + 413, + 723 + ], + "type": "text", + "content": "[41] OpenAI. GPT-4o System Card. Technical report, 2024. Accessed: 2025-02-14." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 721 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 105 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 105 + ], + "type": "text", + "content": "[42] J. S. Park, J. O'Brien, C. J. Cai, M. R. Morris, P. Liang, and M. S. Bernstein. Generative agents: Interactive simulacra of human behavior. In Proceedings of the 36th annual acm symposium on user interface software and technology, pages 1-22, 2023." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 110, + 506, + 131 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 110, + 506, + 131 + ], + "spans": [ + { + "bbox": [ + 106, + 110, + 506, + 131 + ], + "type": "text", + "content": "[43] J. S. Park, C. Q. Zou, A. Shaw, B. M. Hill, C. Cai, M. R. Morris, R. Willer, P. Liang, and M. S. Bernstein. Generative agent simulations of 1,000 people. arXiv preprint arXiv:2411.10109, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 137, + 506, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 137, + 506, + 158 + ], + "spans": [ + { + "bbox": [ + 107, + 137, + 506, + 158 + ], + "type": "text", + "content": "[44] L. Peisakhin, N. Stoop, and P. Van der Windt. Who hosts? the correlates of hosting the internally displaced. American Political Science Review, pages 1-16, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 163, + 506, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 163, + 506, + 194 + ], + "spans": [ + { + "bbox": [ + 106, + 163, + 506, + 194 + ], + "type": "text", + "content": "[45] F. Ribeiro, L. Henrique, F. Benevenuto, A. Chakraborty, J. Kulshrestha, M. Babaei, and K. Gummadi. Media bias monitor: Quantifying biases of social media news outlets at large-scale. In Proceedings of the International AAAI Conference on Web and Social Media, volume 12, 2018." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 200, + 338, + 211 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 200, + 338, + 211 + ], + "spans": [ + { + "bbox": [ + 107, + 200, + 338, + 211 + ], + "type": "text", + "content": "[46] S. J. Rosenstone. Forecasting presidential elections. 1981." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 217, + 468, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 217, + 468, + 228 + ], + "spans": [ + { + "bbox": [ + 107, + 217, + 468, + 228 + ], + "type": "text", + "content": "[47] T. C. Schelling. Models of segregation. The American economic review, 59(2):488-493, 1969." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 233, + 505, + 245 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 233, + 505, + 245 + ], + "spans": [ + { + "bbox": [ + 107, + 233, + 505, + 245 + ], + "type": "text", + "content": "[48] T. C. Schelling. Dynamic models of segregation. Journal of mathematical sociology, 1(2):143-186, 1971." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 250, + 504, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 250, + 504, + 272 + ], + "spans": [ + { + "bbox": [ + 107, + 250, + 504, + 272 + ], + "type": "text", + "content": "[49] Y. Shao, L. Li, J. Dai, and X. Qiu. Character-llm: A trainable agent for role-playing. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 13153–13187, 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 277, + 504, + 298 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 277, + 504, + 298 + ], + "spans": [ + { + "bbox": [ + 107, + 277, + 504, + 298 + ], + "type": "text", + "content": "[50] E. R. Smith and F. R. Conrey. Agent-based modeling: A new approach for theory building in social psychology. *Personality and social psychology review*, 11(1):87-104, 2007." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 303, + 504, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 303, + 504, + 324 + ], + "spans": [ + { + "bbox": [ + 107, + 303, + 504, + 324 + ], + "type": "text", + "content": "[51] L. Sun, S. Wang, X. Huang, and Z. Wei. Identity-driven hierarchical role-playing agents. arXiv preprint arXiv:2407.19412, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 330, + 455, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 330, + 455, + 342 + ], + "spans": [ + { + "bbox": [ + 107, + 330, + 455, + 342 + ], + "type": "text", + "content": "[52] S. Tang. Idea, action, and outcome. Innovation in the Social Sciences, 2(2):123-170, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 346, + 506, + 367 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 346, + 506, + 367 + ], + "spans": [ + { + "bbox": [ + 107, + 346, + 506, + 367 + ], + "type": "text", + "content": "[53] R. A. Teixeira. Red, blue, and purple America: the future of election demographics. Rowman & Littlefield, 2009." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 373, + 505, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 373, + 505, + 396 + ], + "spans": [ + { + "bbox": [ + 107, + 373, + 505, + 396 + ], + "type": "text", + "content": "[54] T. Trimborn, P. Otte, S. Cramer, M. Beikirch, E. Pabich, and M. Frank. Subcemm: A simulator for agent-based computational economic market models. Computational economics, 55(2):707-744, 2020." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 400, + 504, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 400, + 504, + 422 + ], + "spans": [ + { + "bbox": [ + 107, + 400, + 504, + 422 + ], + "type": "text", + "content": "[55] A. van Dalen. Revisiting the algorithms behind the headlines. how journalists respond to professional competition of generative ai. Journalism Practice, pages 1-18, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 427, + 504, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 427, + 504, + 449 + ], + "spans": [ + { + "bbox": [ + 107, + 427, + 504, + 449 + ], + "type": "text", + "content": "[56] L. Wang, J. Zhang, H. Yang, Z. Chen, J. Tang, Z. Zhang, X. Chen, Y. Lin, R. Song, W. X. Zhao, et al. User behavior simulation with large language model based agents. arXiv preprint arXiv:2306.02552, 2023." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 453, + 506, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 453, + 506, + 495 + ], + "spans": [ + { + "bbox": [ + 107, + 453, + 506, + 495 + ], + "type": "text", + "content": "[57] K. Wu, X. Mou, L. Xue, Z. Ying, W. Wang, Q. Zhang, X.-J. Huang, and Z. Wei. Pasum: A pre-training architecture for social media user modeling based on text graph. In Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024), pages 12644-12656, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 501, + 506, + 531 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 501, + 506, + 531 + ], + "spans": [ + { + "bbox": [ + 107, + 501, + 506, + 531 + ], + "type": "text", + "content": "[58] B. Xiao, Z. Yin, and Z. Shan. Simulating public administration crisis: A novel generative agent-based simulation system to lower technology barriers in social science research. arXiv preprint arXiv:2311.06957, 2023." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 537, + 504, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 537, + 504, + 559 + ], + "spans": [ + { + "bbox": [ + 107, + 537, + 504, + 559 + ], + "type": "text", + "content": "[59] A. Yang, B. Yang, B. Zhang, B. Hui, B. Zheng, B. Yu, C. Li, D. Liu, F. Huang, H. Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 107, + 564, + 506, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 564, + 506, + 586 + ], + "spans": [ + { + "bbox": [ + 107, + 564, + 506, + 586 + ], + "type": "text", + "content": "[60] Z. Yang, Z. Zhang, Z. Zheng, Y. Jiang, Z. Gan, Z. Wang, Z. Ling, J. Chen, M. Ma, B. Dong, et al. Oasis: Open agents social interaction simulations on one million agents. arXiv preprint arXiv:2411.11581, 2024." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 107, + 590, + 504, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 590, + 504, + 612 + ], + "spans": [ + { + "bbox": [ + 107, + 590, + 504, + 612 + ], + "type": "text", + "content": "[61] R. Ye, Y. Zhang, Y. Zhang, H. Kuang, Z. Wei, and P. Sun. Multi-agent kto: Reinforcing strategic interactions of large language model in language game. arXiv preprint arXiv:2501.14225, 2025." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 107, + 617, + 504, + 639 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 617, + 504, + 639 + ], + "spans": [ + { + "bbox": [ + 107, + 617, + 504, + 639 + ], + "type": "text", + "content": "[62] S. Yue, S. Wang, W. Chen, X. Huang, and Z. Wei. Synergistic multi-agent framework with trajectory learning for knowledge-intensive tasks. arXiv preprint arXiv:2407.09893, 2024." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 107, + 644, + 506, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 644, + 506, + 685 + ], + "spans": [ + { + "bbox": [ + 107, + 644, + 506, + 685 + ], + "type": "text", + "content": "[63] X. Zhang, H. Kuang, X. Mou, H. Lyu, K. Wu, S. Chen, J. Luo, X. Huang, and Z. Wei. SoMeLVLM: A large vision language model for social media processing. In L.-W. Ku, A. Martins, and V. Srikumar, editors, Findings of the Association for Computational Linguistics ACL 2024, pages 2366-2389, Bangkok, Thailand and virtual meeting, Aug. 2024. Association for Computational Linguistics." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 107, + 691, + 506, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 691, + 506, + 721 + ], + "spans": [ + { + "bbox": [ + 107, + 691, + 506, + 721 + ], + "type": "text", + "content": "[64] X. Zhang, J. Lin, L. Sun, W. Qi, Y. Yang, Y. Chen, H. Lyu, X. Mou, S. Chen, J. Luo, et al. Electionsim: Massive population election simulation powered by large language model driven agents. arXiv preprint arXiv:2410.20746, 2024." + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 241, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 241, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 241, + 85 + ], + "type": "text", + "content": "A Data Cleaning Details" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 95, + 238, + 106 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 95, + 238, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 95, + 238, + 106 + ], + "type": "text", + "content": "A.1 Content Data Extraction" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 115, + 504, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 115, + 504, + 138 + ], + "spans": [ + { + "bbox": [ + 104, + 115, + 504, + 138 + ], + "type": "text", + "content": "We extract only post-related content on all the social media platforms to avoid violating privacy policies. Specifically, the data list on each platform is shown in Table 6." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 206, + 147, + 403, + 209 + ], + "blocks": [ + { + "bbox": [ + 206, + 147, + 403, + 209 + ], + "lines": [ + { + "bbox": [ + 206, + 147, + 403, + 209 + ], + "spans": [ + { + "bbox": [ + 206, + 147, + 403, + 209 + ], + "type": "table", + "html": "
PlatformData list
Xuser ID, tweet, #likes, #coments, #retweets
Rednoteuser ID, notes, #likes, #comments
", + "image_path": "9ab297241e7b5ad5640f2f7c89ea694c7a2b607437aa0355f16e8a8ee7812b35.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 153, + 212, + 455, + 224 + ], + "lines": [ + { + "bbox": [ + 153, + 212, + 455, + 224 + ], + "spans": [ + { + "bbox": [ + 153, + 212, + 455, + 224 + ], + "type": "text", + "content": "Table 6: Data list for each social media platform during the data collection." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 238, + 241, + 251 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 238, + 241, + 251 + ], + "spans": [ + { + "bbox": [ + 105, + 238, + 241, + 251 + ], + "type": "text", + "content": "A.2 Abnormal Data Filtering" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 258, + 506, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 258, + 506, + 304 + ], + "spans": [ + { + "bbox": [ + 104, + 258, + 506, + 304 + ], + "type": "text", + "content": "We filter the abnormal data to guarantee the quality through text similarity calculation. Typically, all the textual content from the same user is calculated by means of the word repetition ratio. The threshold is set to 0.3. If the ratio surpasses the threshold, the user is considered likely to be a robot or advertising and will be filtered." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 740, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 740, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 740, + 311, + 750 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 302, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 302, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 302, + 85 + ], + "type": "text", + "content": "B Demographics Annotation System" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 95, + 206, + 106 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 95, + 206, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 95, + 206, + 106 + ], + "type": "text", + "content": "B.1 LLM Annotation" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 115, + 506, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 115, + 506, + 171 + ], + "spans": [ + { + "bbox": [ + 104, + 115, + 506, + 171 + ], + "type": "text", + "content": "To save costs, we first sample a subset of the user pool and employ multiple power LLMs for annotation. Due to the long time span of this work, users from different data sources in the user pool have used the powerful LLMs available at the time. For users derived from the X, GPT-4o" + }, + { + "bbox": [ + 104, + 115, + 506, + 171 + ], + "type": "inline_equation", + "content": "^5" + }, + { + "bbox": [ + 104, + 115, + 506, + 171 + ], + "type": "text", + "content": ", Claude3.5-Sonnet" + }, + { + "bbox": [ + 104, + 115, + 506, + 171 + ], + "type": "inline_equation", + "content": "^6" + }, + { + "bbox": [ + 104, + 115, + 506, + 171 + ], + "type": "text", + "content": ", and Gemini-1.5" + }, + { + "bbox": [ + 104, + 115, + 506, + 171 + ], + "type": "inline_equation", + "content": "^7" + }, + { + "bbox": [ + 104, + 115, + 506, + 171 + ], + "type": "text", + "content": " are employed. For users derived from the Rednote, GPT-4o, Cluade3.5-Sonnet, and Qwen2.5-72b are employed." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 183, + 214, + 194 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 183, + 214, + 194 + ], + "spans": [ + { + "bbox": [ + 105, + 183, + 214, + 194 + ], + "type": "text", + "content": "B.2 Human Evaluation" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 203, + 504, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 203, + 504, + 247 + ], + "spans": [ + { + "bbox": [ + 104, + 203, + 504, + 247 + ], + "type": "text", + "content": "We employ 7 professional human annotators to verify the results annotated by LLMs. Typically, each annotator is required to re-associate the demographic factors without the LLM labels. All the data are verified by at least 2 human annotators. The overall consistency between humans and LLMs is shown in Table 7." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 187, + 255, + 424, + 347 + ], + "blocks": [ + { + "bbox": [ + 187, + 255, + 424, + 347 + ], + "lines": [ + { + "bbox": [ + 187, + 255, + 424, + 347 + ], + "spans": [ + { + "bbox": [ + 187, + 255, + 424, + 347 + ], + "type": "table", + "html": "
ModelsHuman (X)Human (Rednote)
GPT-4o0.9050.723
Claude3.50.9010.659
Gemini-1.50.713\\
Qwen2.5\\0.846
Majority votes0.9560.849
", + "image_path": "95378075e345f87be207b28094b42ea42f66abffcb8dd1545b7c107a69d3e97d.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 389, + 212, + 402 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 389, + 212, + 402 + ], + "spans": [ + { + "bbox": [ + 105, + 389, + 212, + 402 + ], + "type": "text", + "content": "B.3 Classifier Training" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 409, + 504, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 409, + 504, + 453 + ], + "spans": [ + { + "bbox": [ + 104, + 409, + 504, + 453 + ], + "type": "text", + "content": "We take the majority-voted labels from different LLMs to construct the training dataset. Considering the difference in mainstream language used on different platforms, we employ LongFormer [7] for X data and employ Bert-base-chinese [12] for Rednote. The implementation details are shown in Table 8." + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 187, + 461, + 424, + 592 + ], + "blocks": [ + { + "bbox": [ + 104, + 350, + 504, + 373 + ], + "lines": [ + { + "bbox": [ + 104, + 350, + 504, + 373 + ], + "spans": [ + { + "bbox": [ + 104, + 350, + 504, + 373 + ], + "type": "text", + "content": "Table 7: Human annotators' verification results. We report the consistency between humans and different LLMs." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 187, + 461, + 424, + 592 + ], + "lines": [ + { + "bbox": [ + 187, + 461, + 424, + 592 + ], + "spans": [ + { + "bbox": [ + 187, + 461, + 424, + 592 + ], + "type": "table", + "html": "
ParamsLongFormerBert-base-chinese
train_size10,00010,000
# classifiers54
max_tokens4096512
learning_rate5e-55e-5
batch_size1632
optimizerAdamWAdamW
epochs310
device8*40902*4090
", + "image_path": "2e6b8fa3fd5779995f19c798aae290400cdd80c75f2319f40980aedd30481058.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 183, + 595, + 425, + 607 + ], + "lines": [ + { + "bbox": [ + 183, + 595, + 425, + 607 + ], + "spans": [ + { + "bbox": [ + 183, + 595, + 425, + 607 + ], + "type": "text", + "content": "Table 8: Implementation details for demographic classifiers." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 615, + 484, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 615, + 484, + 627 + ], + "spans": [ + { + "bbox": [ + 104, + 615, + 484, + 627 + ], + "type": "text", + "content": "We report the performances of demographic classifiers on each demographic factor in Table 9." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 639, + 289, + 651 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 639, + 289, + 651 + ], + "spans": [ + { + "bbox": [ + 105, + 639, + 289, + 651 + ], + "type": "text", + "content": "B.4 Overall Distribution of the User Pool" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 659, + 504, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 659, + 504, + 682 + ], + "spans": [ + { + "bbox": [ + 104, + 659, + 504, + 682 + ], + "type": "text", + "content": "We employ the demographic classifiers to annotate all of the users in the user pool, and the overall distributions are shown in Figure 5. For other demographics in specific simulations that are not" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 118, + 689, + 205, + 700 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 689, + 205, + 700 + ], + "spans": [ + { + "bbox": [ + 118, + 689, + 205, + 700 + ], + "type": "text", + "content": "5 gpt-4o-2024-08-06" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 118, + 700, + 246, + 710 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 700, + 246, + 710 + ], + "spans": [ + { + "bbox": [ + 118, + 700, + 246, + 710 + ], + "type": "text", + "content": "6claude-3-5-sonnet-20240620" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 118, + 710, + 178, + 722 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 710, + 178, + 722 + ], + "spans": [ + { + "bbox": [ + 118, + 710, + 178, + 722 + ], + "type": "text", + "content": "7 gemini-1.5-pro" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 187, + 70, + 422, + 192 + ], + "blocks": [ + { + "bbox": [ + 187, + 70, + 422, + 192 + ], + "lines": [ + { + "bbox": [ + 187, + 70, + 422, + 192 + ], + "spans": [ + { + "bbox": [ + 187, + 70, + 422, + 192 + ], + "type": "table", + "html": "
DemosLongFormerBert-base-chinese
AccF1AccF1
Gender0.8750.9040.9260.958
Age0.9020.8730.9250.920
Party0.8490.846\\\\
Ideology0.8100.807\\\\
Race0.7790.768\\\\
Consumption\\\\0.7490.748
Education\\\\0.9540.975
", + "image_path": "b3698fd8a7cc0afbf47a6e31691ed9fd5974990404ef71422f83c8297bb7cea7.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 184, + 196, + 425, + 209 + ], + "lines": [ + { + "bbox": [ + 184, + 196, + 425, + 209 + ], + "spans": [ + { + "bbox": [ + 184, + 196, + 425, + 209 + ], + "type": "text", + "content": "Table 9: Performance of demographic classifiers on test set." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 223, + 504, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 223, + 504, + 246 + ], + "spans": [ + { + "bbox": [ + 104, + 223, + 504, + 246 + ], + "type": "text", + "content": "considered in prior distribution, only users from the sampled user pool are annotated by the majority votes of LLMs." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 109, + 256, + 299, + 411 + ], + "blocks": [ + { + "bbox": [ + 109, + 256, + 299, + 411 + ], + "lines": [ + { + "bbox": [ + 109, + 256, + 299, + 411 + ], + "spans": [ + { + "bbox": [ + 109, + 256, + 299, + 411 + ], + "type": "image", + "image_path": "0cb012c6c55c49fd65fa15745af750ea86382e3c5992b62bd56df63efb6706fc.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 173, + 419, + 436, + 432 + ], + "lines": [ + { + "bbox": [ + 173, + 419, + 436, + 432 + ], + "spans": [ + { + "bbox": [ + 173, + 419, + 436, + 432 + ], + "type": "text", + "content": "Figure 5: Demographic distribution on X and Rednote user pool." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 301, + 257, + 499, + 411 + ], + "blocks": [ + { + "bbox": [ + 301, + 257, + 499, + 411 + ], + "lines": [ + { + "bbox": [ + 301, + 257, + 499, + 411 + ], + "spans": [ + { + "bbox": [ + 301, + 257, + 499, + 411 + ], + "type": "image", + "image_path": "cb26ecd99c58640d8eb4effc287a4e8bee0c4214555edb59f3d149882199fb82.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 354, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 354, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 354, + 85 + ], + "type": "text", + "content": "C Demographic Distribution Sampling Details" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 95, + 258, + 107 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 95, + 258, + 107 + ], + "spans": [ + { + "bbox": [ + 105, + 95, + 258, + 107 + ], + "type": "text", + "content": "C.1 Iterative Proportional Fitting" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 115, + 506, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 115, + 506, + 160 + ], + "spans": [ + { + "bbox": [ + 104, + 115, + 506, + 160 + ], + "type": "text", + "content": "In our study, we follow the classical IPF method to construct the joint distribution of all the attributes in our simulation. Specifically, we start with a two-way table with individual components denoted as " + }, + { + "bbox": [ + 104, + 115, + 506, + 160 + ], + "type": "inline_equation", + "content": "x_{ij}" + }, + { + "bbox": [ + 104, + 115, + 506, + 160 + ], + "type": "text", + "content": " and targeted estimation " + }, + { + "bbox": [ + 104, + 115, + 506, + 160 + ], + "type": "inline_equation", + "content": "\\hat{x}_{ij}" + }, + { + "bbox": [ + 104, + 115, + 506, + 160 + ], + "type": "text", + "content": ". The targeted estimation " + }, + { + "bbox": [ + 104, + 115, + 506, + 160 + ], + "type": "inline_equation", + "content": "\\hat{x}_{ij}" + }, + { + "bbox": [ + 104, + 115, + 506, + 160 + ], + "type": "text", + "content": " satisfies " + }, + { + "bbox": [ + 104, + 115, + 506, + 160 + ], + "type": "inline_equation", + "content": "\\Sigma_j\\hat{x}_{ij} = v_i" + }, + { + "bbox": [ + 104, + 115, + 506, + 160 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 115, + 506, + 160 + ], + "type": "inline_equation", + "content": "\\Sigma_i\\hat{x}_{ij} = w_j" + }, + { + "bbox": [ + 104, + 115, + 506, + 160 + ], + "type": "text", + "content": ". The iterations are specified as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 167, + 217, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 167, + 217, + 184 + ], + "spans": [ + { + "bbox": [ + 104, + 167, + 217, + 184 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 104, + 167, + 217, + 184 + ], + "type": "inline_equation", + "content": "\\hat{x}_{ij}^{(0)} = x_{ij}" + }, + { + "bbox": [ + 104, + 167, + 217, + 184 + ], + "type": "text", + "content": ". For " + }, + { + "bbox": [ + 104, + 167, + 217, + 184 + ], + "type": "inline_equation", + "content": "\\alpha > 1" + }, + { + "bbox": [ + 104, + 167, + 217, + 184 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 252, + 190, + 505, + 224 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 252, + 190, + 505, + 224 + ], + "spans": [ + { + "bbox": [ + 252, + 190, + 505, + 224 + ], + "type": "interline_equation", + "content": "\\hat {x} _ {i j} ^ {(2 \\alpha - 1)} = \\frac {\\hat {x} _ {i j} ^ {(2 \\alpha - 2)} v _ {i}}{\\sum_ {k = 1} ^ {J} \\hat {x} _ {i j} ^ {(2 \\alpha - 2)}} \\tag {1}", + "image_path": "ff781687759cc0943011e2da7ef1096cc2baed0a7ee1909c62cdf62fa3b74e60.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 258, + 227, + 505, + 262 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 258, + 227, + 505, + 262 + ], + "spans": [ + { + "bbox": [ + 258, + 227, + 505, + 262 + ], + "type": "interline_equation", + "content": "\\hat {x} _ {i j} ^ {(2 \\alpha)} = \\frac {\\hat {x} _ {i j} ^ {(2 \\alpha - 1)} w _ {j}}{\\Sigma_ {k = 1} ^ {I} \\hat {x} _ {i j} ^ {(2 \\alpha - 1)}} \\tag {2}", + "image_path": "c455658df89756889d84385989fcf4d04f7baa808ab9d0760fe1501340dd9f50.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 269, + 504, + 292 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 269, + 504, + 292 + ], + "spans": [ + { + "bbox": [ + 104, + 269, + 504, + 292 + ], + "type": "text", + "content": "The iterations end when the estimated marginals are sufficiently close to the real marginals or when they stabilize without further convergence." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 296, + 505, + 362 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 296, + 505, + 362 + ], + "spans": [ + { + "bbox": [ + 104, + 296, + 505, + 362 + ], + "type": "text", + "content": "For the presidential election simulation, we implement the IPF algorithm for each state using five attributes: gender, race, age group, ideology, and partisanship. In most cases, the algorithm does not converge, but the gaps between the estimated and actual marginals are less than " + }, + { + "bbox": [ + 104, + 296, + 505, + 362 + ], + "type": "inline_equation", + "content": "5\\%" + }, + { + "bbox": [ + 104, + 296, + 505, + 362 + ], + "type": "text", + "content": ", with 888 out of 918 marginals falling within this range. For the outliers, since IPF adjusts proportionally to the marginals, the overall ratio of marginals remains consistent. We then use the estimated joint distribution and marginals for our massive simulation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 374, + 269, + 387 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 374, + 269, + 387 + ], + "spans": [ + { + "bbox": [ + 105, + 374, + 269, + 387 + ], + "type": "text", + "content": "C.2 Identical Distribution Sampling" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 394, + 504, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 394, + 504, + 429 + ], + "spans": [ + { + "bbox": [ + 104, + 394, + 504, + 429 + ], + "type": "text", + "content": "Identical distribution sampling, also known as direct sampling, is applied when the joint distribution of multiple demographics is available. Given feature " + }, + { + "bbox": [ + 104, + 394, + 504, + 429 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 104, + 394, + 504, + 429 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 394, + 504, + 429 + ], + "type": "inline_equation", + "content": "Y" + }, + { + "bbox": [ + 104, + 394, + 504, + 429 + ], + "type": "text", + "content": ", the joint distribution can be formulated as " + }, + { + "bbox": [ + 104, + 394, + 504, + 429 + ], + "type": "inline_equation", + "content": "p(X,Y)" + }, + { + "bbox": [ + 104, + 394, + 504, + 429 + ], + "type": "text", + "content": ". Then, identical distribution sampling can be formulated as follows:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 231, + 430, + 505, + 444 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 430, + 505, + 444 + ], + "spans": [ + { + "bbox": [ + 231, + 430, + 505, + 444 + ], + "type": "interline_equation", + "content": "\\left(X _ {i}, Y _ {i}\\right) \\sim p (X, Y) \\quad i = 1, 2, \\dots , n \\tag {3}", + "image_path": "36082be4a11f38a78da16a370c6d153ea25495bc0786e11c4d30ffd3e799783c.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 452, + 506, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 452, + 506, + 486 + ], + "spans": [ + { + "bbox": [ + 104, + 452, + 506, + 486 + ], + "type": "text", + "content": "For breaking news feedback simulations, as the ground truth set is directly from the Rednote, we can obtain all the users' demographics and calculate the joint distribution. Simultaneously, the scale of the user pool satisfies the direct sampling requirements." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 498, + 337, + 510 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 498, + 337, + 510 + ], + "spans": [ + { + "bbox": [ + 105, + 498, + 337, + 510 + ], + "type": "text", + "content": "C.3 Prior Distribution of National Economic Survey" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 517, + 506, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 517, + 506, + 595 + ], + "spans": [ + { + "bbox": [ + 104, + 517, + 506, + 595 + ], + "type": "text", + "content": "For the national economic survey distribution, only average income is available from the official data. As a result, we generate the prior income distribution at the regional level. The income distribution across different regions exhibits significant heterogeneity, often characterized by a right-skewed pattern. To model this distribution, we adopt a mixture distribution approach, combining a lognormal distribution for the majority of the population with a Pareto distribution for the high-income segment. This hybrid model captures both the bulk of wage earners and the long-tail effect observed in high-income groups." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 599, + 504, + 623 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 599, + 504, + 623 + ], + "spans": [ + { + "bbox": [ + 104, + 599, + 504, + 623 + ], + "type": "text", + "content": "Formally, let " + }, + { + "bbox": [ + 104, + 599, + 504, + 623 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 104, + 599, + 504, + 623 + ], + "type": "text", + "content": " denote an individual's wage. We assume that for the lower and middle-income groups " + }, + { + "bbox": [ + 104, + 599, + 504, + 623 + ], + "type": "inline_equation", + "content": "(X < x_{\\min})" + }, + { + "bbox": [ + 104, + 599, + 504, + 623 + ], + "type": "text", + "content": ", incomes follow a log-normal distribution:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 252, + 632, + 505, + 647 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 252, + 632, + 505, + 647 + ], + "spans": [ + { + "bbox": [ + 252, + 632, + 505, + 647 + ], + "type": "interline_equation", + "content": "X \\sim \\log \\text {N o r m a l} (\\mu , \\sigma^ {2}) \\tag {4}", + "image_path": "c89d917b41e40f4b606b5a27431a400adae43e74cb416cbd6208d68554aee7ab.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 649, + 133, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 649, + 133, + 658 + ], + "spans": [ + { + "bbox": [ + 105, + 649, + 133, + 658 + ], + "type": "text", + "content": "where" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 196, + 658, + 505, + 687 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 196, + 658, + 505, + 687 + ], + "spans": [ + { + "bbox": [ + 196, + 658, + 505, + 687 + ], + "type": "interline_equation", + "content": "\\mu = \\ln \\left(\\frac {\\mu_ {\\text {a c t u a l}} ^ {2}}{\\sqrt {\\sigma_ {\\text {a c t u a l}} ^ {2} + \\mu_ {\\text {a c t u a l}} ^ {2}}}\\right), \\quad \\sigma = \\sqrt {\\ln \\left(1 + \\frac {\\sigma_ {\\text {a c t u a l}} ^ {2}}{\\mu_ {\\text {a c t u a l}} ^ {2}}\\right)} \\tag {5}", + "image_path": "7c9a05886551ed993c5c977d179fdfeb24745f20e17ac999b1da310b35dbc695.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 694, + 413, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 694, + 413, + 707 + ], + "spans": [ + { + "bbox": [ + 104, + 694, + 413, + 707 + ], + "type": "text", + "content": "For the high-income group " + }, + { + "bbox": [ + 104, + 694, + 413, + 707 + ], + "type": "inline_equation", + "content": "(X\\geq x_{min})" + }, + { + "bbox": [ + 104, + 694, + 413, + 707 + ], + "type": "text", + "content": " , wages follow a Pareto distribution:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 235, + 710, + 505, + 723 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 710, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 235, + 710, + 505, + 723 + ], + "type": "interline_equation", + "content": "P (X \\geq x) = C x ^ {- \\alpha}, \\quad x \\geq x _ {\\min } \\tag {6}", + "image_path": "7904c50696c6c4b3ef575c985f044bc7eab44f178d18b3c0d1f745b398df9e69.jpg" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 73, + 504, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 73, + 504, + 148 + ], + "spans": [ + { + "bbox": [ + 107, + 73, + 504, + 148 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 107, + 73, + 504, + 148 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 107, + 73, + 504, + 148 + ], + "type": "text", + "content": " is the Pareto shape parameter determining the income concentration at the top. The proportion of individuals assigned to each distribution is governed by an empirical threshold ratio, typically set such that " + }, + { + "bbox": [ + 107, + 73, + 504, + 148 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 107, + 73, + 504, + 148 + ], + "type": "text", + "content": " of the population follows the log-normal distribution while " + }, + { + "bbox": [ + 107, + 73, + 504, + 148 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 107, + 73, + 504, + 148 + ], + "type": "text", + "content": " follows the Pareto distribution. This mixture approach provides a flexible yet robust framework for simulating realistic income distributions across diverse economic conditions. We set all the parameters empirically according to previous research and generate the income distribution for 31 regions in China (Hong Kong, Macao, and Taiwan are excluded)." + } + ] + } + ], + "index": 0 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 301, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 301, + 741, + 310, + 750 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 279, + 86 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 279, + 86 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 279, + 86 + ], + "type": "text", + "content": "D Questionnaire Design Details" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 95, + 348, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 95, + 348, + 108 + ], + "spans": [ + { + "bbox": [ + 105, + 95, + 348, + 108 + ], + "type": "text", + "content": "We provide the questionnaires here for all three simulations." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 120, + 346, + 132 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 120, + 346, + 132 + ], + "spans": [ + { + "bbox": [ + 105, + 120, + 346, + 132 + ], + "type": "text", + "content": "D.1 Questionnaire for Presidential Election Prediction" + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 106, + 145, + 525, + 713 + ], + "blocks": [ + { + "bbox": [ + 106, + 145, + 525, + 713 + ], + "lines": [ + { + "bbox": [ + 106, + 145, + 525, + 713 + ], + "spans": [ + { + "bbox": [ + 106, + 145, + 525, + 713 + ], + "type": "table", + "html": "
Q01Voting Behavior
QuestionORDER OF MAJOR PARTY CANDIDATE NAMES
Value Labels1. Democrat first / Republican second2. Republican first / Democrat second
Q02Social Security
QuestionNext I am going to read you a list of federal programs. For each one, I would like you to tell me whether you would like to see spending increased, decreased, or kept the same.What about Social Security? Should federal spending on Social Security be increased, decreased, or kept the same?
Value Labels-2. DK/RF1. Increased2. Decreased3. Kept the same
Q03Education
QuestionWhat about public schools? Should federal spending on public schools be increased, decreased, or kept the same?
Value Labels-2. DK/RF1. Increased2. Decreased3. Kept the same
Q04Immigration
QuestionWhat about tightening border security to prevent illegal immigration? Should federal spending on tightening border security to prevent illegal immigration be increased, decreased, or kept the same?
Value Labels-2. DK/RF1. Increased2. Decreased3. Kept the same
Q05Criminal Justice
QuestionWhat about dealing with crime? Should federal spending on dealing with crime be increased, decreased, or kept the same?
Value Labels-2. DK/RF1. Increased2. Decreased3. Kept the same
Q06Social Welfare
QuestionWhat about welfare programs? Should federal spending on welfare programs be increased, decreased, or kept the same?
Value Labels-2. DK/RF1. Increased2. Decreased3. Kept the same
Q07Infrastructure
QuestionWhat about building and repairing highways? Should federal spending on building and repairing highways be increased, decreased, or kept the same?
Value Labels-2. DK/RF\n1. Increased\n2. Decreased\n3. Kept the same
Q08Aid to Poor
QuestionWhat about aid to the poor? Should federal spending on aid to the poor be increased, decreased, or kept the same?
Value Labels-2. DK/RF\n1. Increased\n2. Decreased\n3. Kept the same
Q09Environment
QuestionWhat about protecting the environment? Should federal spending on protecting the environment be increased, decreased, or kept the same?
Value Labels-2. DK/RF\n1. Increased\n2. Decreased\n3. Kept the same
Q10Government
QuestionHow much do you feel that having elections makes the government pay attention to what the people think?
Value Labels-2. DK/RF\n1. A good deal\n2. Some\n3. Not much
Q11Economy
QuestionWhich party do you think would do a better job of handling the nation's economy?
Value Labels-2. DK/RF\n1. Democrats would do a better job\n2. Not much difference between them\n3. Republicans would do a better job
Q12Health Care
QuestionWhich party do you think would do a better job of handling health care?
Value Labels-2. DK/RF\n1. Democrats would do a better job\n2. Not much difference between them\n3. Republicans would do a better job
Q13Immigration
QuestionWhich party do you think would do a better job of handling immigration?
Value Labels-2. DK/RF\n1. Democrats would do a better job\n2. Not much difference between them\n3. Republicans would do a better job
Q14Taxes
QuestionWhich party do you think would do a better job of handling taxes?
Value Labels-2. DK/RF\n1. Democrats would do a better job\n2. Not much difference between them\n3. Republicans would do a better job
Q15Environment
QuestionWhich party do you think would do a better job of handling the environment?
Value Labels-2. DK/RF\n1. Democrats would do a better job\n2. Not much difference between them\n3. Republicans would do a better job
Q16Education
QuestionSome people think the government should provide fewer services even in areas such as health and education in order to reduce spending.\nOther people feel it is important for the government to provide many more services even if it means an increase in spending.\nAnd, of course, some people have a neutral position.\nWhich of the following best describes your view?
Value Labels-2. DK/RF\n1. Government should provide fewer services\n2. Neutral\n3. Government should provide more services
Q17Defense
QuestionSome people believe that we should spend less money for defense.\nOthers feel that defense spending should be increased.\nAnd, of course, some people have a neutral position.\nWhich of the following best describes your view?
Value Labels-2. DK/RF\n1. Decrease defense spending\n2. Neutral\n3. Increase defense spending
Q18Health Care
QuestionThere is much concern about the rapid rise in medical and hospital costs.\nSome people feel there should be a government insurance plan which would cover all medical and hospital expenses for everyone.\nOthers feel that all medical expenses should be paid by individuals through private insurance plans like Blue Cross or other company paid plans.\nAnd, of course, some people have a neutral position.\nWhich of the following best describes your view?
Value Labels-2. DK/RF\n1. Government insurance plan\n2. Neutral\n3. Private insurance plan
Q19Social Welfare
QuestionSome people feel the government in Washington should see to it that every person has a job and a good standard of living.\nOthers think the government should just let each person get ahead on their own.\nAnd, of course, some people have a neutral position.\nWhich of the following best describes your view?
Value Labels-2. DK/RF\n1. Government should see to jobs and standard of living\n2. Neutral\n3. Government should let each person get ahead on own
Q20Aid to Blacks
QuestionSome people feel that the government in Washington should make every effort to improve the social and economic position of blacks.\nOthers feel that the government should not make any special effort to help blacks because they should help themselves.\nAnd, of course, some people have a neutral position.\nWhich of the following best describes your view?
Value Labels-2. DK/RF\n1. Government should help blacks\n2. Neutral\n3. Blacks should help themselves
Q21Environment
QuestionSome people think we need much tougher government regulations on business in order to protect the environment.\nOthers think that current regulations to protect the environment are already too much of a burden on business.\nAnd, of course, some people have a neutral position.\nWhich of the following best describes your view?
Value Labels-2. DK/RF\n1. Tougher regulations on business needed to protect environment\n2. Neutral\n3. Regulations to protect environment already too much a burden on business
Q22Abortion
QuestionWould you be pleased, upset, or neither pleased nor upset if the Supreme Court reduced abortion rights?
Value Labels-2. DK/RF\n1. Pleased\n2. Upset\n3. Neither pleased nor upset
Q23Criminal Justice
QuestionDo you favor or oppose the death penalty for persons convicted of murder?
Value Labels-2. DK/RF\n1. Favor\n2. Oppose
Q24US Position in World
QuestionDo you agree or disagree with this statement: ‘This country would be better off if we just stayed home and did not concern ourselves with problems in other parts of the world.’
Value Labels-2. DK/RF\n1. Agree\n2. Disagree
Q25US Position in World
QuestionHow willing should the United States be to use military force to solve international problems?
Value Labels-2. DK/RF\n1. Willing\n2. Moderately willing\n3. Not willing
Q26Inequality
QuestionDo you think the difference in incomes between rich people and poor people in the United States today is larger, smaller, or about the same as it was 20 years ago?
Value Labels-2. DK/RF\n1. Larger\n2. Smaller\n3. About the same
Q27Environment
QuestionDo you think the federal government should be doing more about rising temperatures, should be doing less, or is it currently doing the right amount?
Value Labels-2. DK/RF\n1. Should be doing more\n2. Should be doing less\n3. Is currently doing the right amount
Q28Parental Leave
QuestionDo you favor, oppose, or neither favor nor oppose requiring employers to offer paid leave to parents of new children?
Value Labels-2. DK/RF\n1. Favor\n2. Oppose\n3. Neither favor nor oppose
Q29LGBTQ+ Rights
QuestionDo you think business owners who provide wedding-related services should be allowed to refuse services to same-sex couples if same-sex marriage violates their religious beliefs, or do you think business owners should be required to provide services regardless of a couple's sexual orientation?
Value Labels-2. DK/RF\n1. Should be allowed to refuse\n2. Should be required to provide services
Q30LGBTQ+ Rights
QuestionShould transgender people - that is, people who identify themselves as the sex or gender different from the one they were born as - have to use the bathrooms of the gender they were born as, or should they be allowed to use the bathrooms of their identified gender?
Value Labels-2. DK/RF\n1. Have to use the bathrooms of the gender they were born as\n2. Be allowed to use the bathrooms of their identified gender
Q31LGBTQ+ Rights
QuestionDo you favor or oppose laws to protect gays and lesbians against job discrimination?
Value Labels-2. DK/RF\n1. Favor\n2. Oppose
Q32LGBTQ+ Rights
QuestionDo you think gay or lesbian couples should be legally permitted to adopt children?
Value Labels-2. DK/RF\n1. Yes\n2. No
Q33LGBTQ+ Rights
QuestionWhich comes closest to your view? You can just tell me the number of your choice.
Value Labels-2. DK/RF 1. Gay and lesbian couples should be allowed to legally marry\n2. Gay and lesbian couples should be allowed to form civil unions but not legally marry\n3. There should be no legal recognition of gay or lesbian couples' relationship
Q34Immigration
QuestionSome people have proposed that the U.S. Constitution should be changed so that the children of unauthorized immigrants do not automatically get citizenship if they are born in this country.\nDo you favor, oppose, or neither favor nor oppose this proposal?
Value Labels-2. DK/RF\n1. Favor\n2. Oppose\n3. Neither favor nor oppose
Q35Immigration
QuestionWhat should happen to immigrants who were brought to the U.S. illegally as children and have lived here for at least 10 years and graduated high school here? Should they be sent back where they came from, or should they be allowed to live and work in the United States?
Value Labels-2. DK/RF\n1. Should be sent back where they came from\n2. Should be allowed to live and work in the US
Q36Immigration
QuestionDo you favor, oppose, or neither favor nor oppose building a wall on the U.S. border with Mexico?
Value Labels-2. DK/RF\n1. Favor\n2. Oppose\n3. Neither favor nor oppose
Q37Unrest
QuestionDuring the past few months, would you say that most of the actions taken by protestors to get the things they want have been violent, or have most of these actions by protestors been peaceful, or have these actions been equally violent and peaceful?
Value Labels-2. DK/RF\n1. Mostly violent\n2. Mostly peaceful\n3. Equally violent and peaceful
Q38Government
QuestionDo you think it is better when one party controls both the presidency and Congress, better when control is split between the Democrats and Republicans, or doesn’t it matter?
Value Labels-2. DK/RF\n1. Better when one party controls both\n2. Better when control is split\n3. It doesn’t matter
Q39Government
QuestionWould you say the government is pretty much run by a few big interests looking out for themselves or that it is run for the benefit of all the people?
Value Labels-2. DK/RF\n1. Run by a few big interests\n2. For the benefit of all the people
Q40Government
QuestionDo you think that people in government waste a lot of the money we pay in taxes, waste some of it, or don’t waste very much of it?
Value Labels-2. DK/RF\n1. Waste a lot\n2. Waste some\n3. Don’t waste very much
Q41Election Integrity
QuestionDo you favor, oppose, or neither favor nor oppose allowing convicted felons to vote once they complete their sentence?
Value Labels-2. DK/RF\n1. Favor\n2. Oppose\n3. Neither favor nor oppose
Q42Democratic Norms
QuestionHow important is it that news organizations are free to criticize political leaders?
Value Labels-2. DK/RF\n1. Not important\n2. Moderately important\n3. Important
Q43Democratic Norms
QuestionHow important is it that the executive, legislative, and judicial branches of government keep one another from having too much power?
Value Labels-2. DK/RF\n1. Not important\n2. Moderately important\n3. Important
Q44Democratic Norms
QuestionHow important is it that elected officials face serious consequences if they engage in misconduct?
Value Labels-2. DK/RF\n1. Not important\n2. Moderately important\n3. Important
Q45Democratic Norms
QuestionHow important is it that people agree on basic facts even if they disagree politically?
Value Labels-2. DK/RF\n1. Not important\n2. Moderately important\n3. Important
Q46Democratic Norms
QuestionWould it be helpful, harmful, or neither helpful nor harmful if U.S. presidents could work on the country’s problems without paying attention to what Congress and the courts say?
Value Labels-2. DK/RF\n1. Helpful\n2. Harmful\n3. Neither helpful nor harmful
Q47Democratic Norms
QuestionDo you favor, oppose, or neither favor nor oppose elected officials restricting journalists’ access to information about government decision-making?
Value Labels-2. DK/RF\n1. Favor\n2. Oppose\n3. Neither favor nor oppose
Q48Gender Resentment
Question‘Many women interpret innocent remarks or acts as being sexist.’\nDo you agree, neither agree nor disagree, or disagree with this statement?
Value Labels-2. DK/RF/technical error\n1. Agree\n2. Neither agree nor disagree\n3. Disagree
Q49Gender Resentment
Question‘Women seek to gain power by getting control over men.’\nDo you agree, neither agree nor disagree, or disagree with this statement?
Value Labels-2. DK/RF/technical error\n1. Agree\n2. Neither agree nor disagree\n3. Disagree
", + "image_path": "a66412febde24e12e177a5c22dc9307635f04919238d13f32d8145fd1596adf3.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 69, + 524, + 681 + ], + "blocks": [ + { + "bbox": [ + 106, + 69, + 524, + 681 + ], + "lines": [], + "index": 0, + "angle": 0, + "type": "table_body", + "lines_deleted": true + } + ], + "index": 0 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 71, + 524, + 712 + ], + "blocks": [ + { + "bbox": [ + 106, + 71, + 524, + 712 + ], + "lines": [], + "index": 0, + "angle": 0, + "type": "table_body", + "lines_deleted": true + } + ], + "index": 0 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 71, + 524, + 714 + ], + "blocks": [ + { + "bbox": [ + 106, + 71, + 524, + 714 + ], + "lines": [], + "index": 0, + "angle": 0, + "type": "table_body", + "lines_deleted": true + } + ], + "index": 0 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 71, + 524, + 712 + ], + "blocks": [ + { + "bbox": [ + 106, + 71, + 524, + 712 + ], + "lines": [], + "index": 0, + "angle": 0, + "type": "table_body", + "lines_deleted": true + } + ], + "index": 0 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 69, + 524, + 680 + ], + "blocks": [ + { + "bbox": [ + 106, + 69, + 524, + 680 + ], + "lines": [], + "index": 0, + "angle": 0, + "type": "table_body", + "lines_deleted": true + } + ], + "index": 0 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 69, + 525, + 691 + ], + "blocks": [ + { + "bbox": [ + 106, + 69, + 525, + 691 + ], + "lines": [], + "index": 0, + "angle": 0, + "type": "table_body", + "lines_deleted": true + } + ], + "index": 0 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 71, + 525, + 300 + ], + "blocks": [ + { + "bbox": [ + 106, + 71, + 525, + 300 + ], + "lines": [], + "index": 0, + "angle": 0, + "type": "table_body", + "lines_deleted": true + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 318, + 318, + 330 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 318, + 318, + 330 + ], + "spans": [ + { + "bbox": [ + 105, + 318, + 318, + 330 + ], + "type": "text", + "content": "D.2 Questionnaire for Breaking News Feedback" + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 106, + 343, + 525, + 721 + ], + "blocks": [ + { + "bbox": [ + 106, + 343, + 525, + 721 + ], + "lines": [ + { + "bbox": [ + 106, + 343, + 525, + 721 + ], + "spans": [ + { + "bbox": [ + 106, + 343, + 525, + 721 + ], + "type": "table", + "html": "
Q01Public Cognition (PC)
QuestionI have heard of ChatGPT.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q02Public Cognition (PC)
QuestionMany people around me use ChatGPT.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q03Public Cognition (PC)
QuestionI have a deep understanding of ChatGPT's functions and applications.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q04Perceived Risks (PR)
QuestionChatGPT may lead to the widespread dissemination of false information.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q05Perceived Risks (PR)
QuestionChatGPT may reduce human thinking ability and creativity.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q06Perceived Risks (PR)
QuestionThe development of ChatGPT may replace certain jobs, and I am deeply concerned about this.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q07Perceived Benefits (PB)
QuestionChatGPT will definitely improve my work and study efficiency.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q08Perceived Benefits (PB)
QuestionChatGPT helps broaden my knowledge and provides me with new perspectives and ideas.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q09Perceived Benefits (PB)
QuestionChatGPT promotes technological innovation and development in related fields.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q10Trust (TR)
QuestionI fully trust the team developing ChatGPT to manage and guide its development responsibly.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q11Trust (TR)
QuestionI have strong confidence in the accuracy and reliability of the information generated by ChatGPT.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q12Trust (TR)
QuestionI believe that the future application of ChatGPT will be effectively regulated.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q13Fairness (FA)
QuestionThe opportunities to use ChatGPT are distributed fairly among different groups of people.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q14Fairness (FA)
QuestionI find the distribution of benefits brought by ChatGPT to be fair.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q15Fairness (FA)
QuestionI believe that the decision-making process for the development and promotion of ChatGPT is fully transparent and adequately reflects public interests.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q16Public Acceptance (PA)
QuestionOverall, I strongly welcome the emergence of ChatGPT.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q17Public Acceptance (PA)
QuestionI am definitely willing to use ChatGPT in my work or studies.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
Q18Public Acceptance (PA)
QuestionI strongly support increased investment in the research and development of AI technologies like ChatGPT.
Value Labels1. Disagree\n2. Partially disagree\n3. Neutral\n4. Partially agree\n5. Agree
", + "image_path": "7ce59a581d9686e1bdc3056be8b7786b1ee7c8b56671e62f94b9629946ea1c5e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 105, + 69, + 524, + 668 + ], + "blocks": [ + { + "bbox": [ + 105, + 69, + 524, + 668 + ], + "lines": [], + "index": 0, + "angle": 0, + "type": "table_body", + "lines_deleted": true + } + ], + "index": 0 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 105, + 69, + 525, + 708 + ], + "blocks": [ + { + "bbox": [ + 105, + 69, + 525, + 708 + ], + "lines": [], + "index": 0, + "angle": 0, + "type": "table_body", + "lines_deleted": true + } + ], + "index": 0 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 70, + 523, + 158 + ], + "blocks": [ + { + "bbox": [ + 106, + 70, + 523, + 158 + ], + "lines": [], + "index": 0, + "angle": 0, + "type": "table_body", + "lines_deleted": true + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 173, + 324, + 186 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 173, + 324, + 186 + ], + "spans": [ + { + "bbox": [ + 105, + 173, + 324, + 186 + ], + "type": "text", + "content": "D.3 Questionnaire for National Economic Survey" + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 106, + 198, + 525, + 703 + ], + "blocks": [ + { + "bbox": [ + 106, + 198, + 525, + 703 + ], + "lines": [ + { + "bbox": [ + 106, + 198, + 525, + 703 + ], + "spans": [ + { + "bbox": [ + 106, + 198, + 525, + 703 + ], + "type": "table", + "html": "
Q01Food
QuestionWhat is your average monthly expenditure on food (including dining out)? (Unit: CNY)
Value LabelsA. Below 500 CNY\nB. 501-650 CNY\nC. 651-800 CNY\nD. 801-1000 CNY\nE. Above 1000 CNY
Q02Food
QuestionDo you think your current spending on food, tobacco, and alcohol is too high relative to your income?
Value LabelsA. Yes\nB. No\nC. Acceptable
Q03Clothing
QuestionWhat is your average monthly expenditure on clothing (including apparel, shoes, and accessories)? (Unit: CNY)
Value LabelsA. Below 50 CNY\nB. 51-100 CNY\nC. 101-150 CNY\nD. 151-200 CNY\nE. Above 200 CNY
Q04Clothing
QuestionHow much economic pressure do you feel from clothing expenses?
Value LabelsA. Very low, almost no pressure\nB. Moderate, some pressure but manageable\nC. High, requires careful spending\nD. Very high, affects spending in other areas
Q05Household
QuestionWhat is your average monthly housing expenditure? (Including rent, mortgage, property fees, maintenance, etc.) (Unit: CNY)
Value LabelsA. Below 200 CNY\nB. 201-500 CNY\nC. 501-800 CNY\nD. 801-1200 CNY\nE. Above 1200 CNY
Q06Household
QuestionWhat percentage of your monthly income is spent on housing? (Including rent, mortgage, property fees, maintenance, etc.)
Value LabelsA. Below 10% \nB. 10%-20% \nC. 21%-30% \nD. 31%-40% \nE. Above 40%
Q07Daily Service
QuestionWhat is your average monthly expenditure on daily necessities (personal care, house- hold items, cleaning supplies, etc.) and services (housekeeping, repairs, beauty, pet services, etc.)? (Unit: CNY)
Value LabelsA. Below 80 CNY \nB. 81-120 CNY \nC. 121-160 CNY \nD. 161-200 CNY \nE. Above 200 CNY
Q08Transportation & Communication
QuestionWhat is your average monthly expenditure on transportation (public transport, taxis, fuel, parking, etc.) and communication (mobile and internet fees)? (Unit: CNY)
Value LabelsA. Below 200 CNY \nB. 201-300 CNY \nC. 301-400 CNY \nD. 401-500 CNY \nE. Above 500 CNY
Q09Education & Entertainment
QuestionWhat is your average monthly expenditure on education (tuition, training, books, etc.) and cultural entertainment (movies, performances, games, fitness, cultural activities, etc.)? (Unit: CNY)
Value LabelsA. Below 100 CNY \nB. 101-200 CNY \nC. 201-300 CNY \nD. 301-400 CNY \nE. Above 400 CNY
Q10Education & Entertainment
QuestionCan you easily afford your current education, cultural, and entertainment expenses?
Value LabelsA. Yes, spending does not affect other areas \nB. Barely, needs some control \nC. Not really, affects other expenditures \nD. No, it creates significant financial pressure
Q11Medical
QuestionWhat is your average monthly expenditure on healthcare (medications, medical services, health management, etc.)? (Unit: CNY)
Value LabelsA. Below 100 CNY \nB. 101-200 CNY \nC. 201-300 CNY \nD. 301-400 CNY \nE. Above 400 CNY
Q12Medical
QuestionHave you purchased private medical or health insurance for yourself or your family?
Value LabelsA. Yes \nB. Not yet, but planning to \nC. No, and no plans to
Q13Others
QuestionBesides food, clothing, housing, daily necessities and services, transportation, education, culture, and healthcare, what is your average monthly expenditure on other areas (e.g., hobbies, charitable donations, investment, etc.)? (Unit: CNY)
Value LabelsA. Below 30 CNY\nB. 31-60 CNY\nC. 61-90 CNY\nD. 91-120 CNY\nE. Above 120 CNY
Q14Overall
QuestionHow would you evaluate the impact of your current consumption level on your household (or personal) financial situation?
Value LabelsA. Comfortable, can moderately increase spending\nB. Average, can maintain current spending\nC. Tight, need to control or reduce spending\nD. Very tight, affects quality of life
Q15Overall
QuestionDo you feel that your consumption pressure is too high relative to your income level?
Value LabelsA. Yes\nB. No\nC. Not sure
Q16Overall
QuestionIf your income increases, which consumption areas would you most like to expand or improve? (Multiple choices allowed)
Value LabelsA. Food and alcohol\nB. Clothing\nC. Housing\nD. Daily necessities and services\nE. Transportation and communication\nF. Education, culture, and entertainment\nG. Healthcare\nH. Other goods and services
Q17Overall
QuestionWhat is your consumption expectation for the next six months to a year?
Value LabelsA. Will continue to increase\nB. Will remain roughly the same\nC. Will moderately decrease\nD. Uncertain
", + "image_path": "18070cfafc0e2c3f8693e46f9ae0e339394f7c42e0f7d13109ea8126d6c4d3ff.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "30" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 71, + 524, + 717 + ], + "blocks": [ + { + "bbox": [ + 106, + 71, + 524, + 717 + ], + "lines": [], + "index": 0, + "angle": 0, + "type": "table_body", + "lines_deleted": true + } + ], + "index": 0 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "31" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 30 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 69, + 524, + 537 + ], + "blocks": [ + { + "bbox": [ + 106, + 69, + 524, + 537 + ], + "lines": [], + "index": 0, + "angle": 0, + "type": "table_body", + "lines_deleted": true + } + ], + "index": 0 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "32" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 31 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10160/b4495eb2-d145-42fa-9b61-d633ab2f00d9_content_list.json b/data/2025/2504_10xxx/2504.10160/b4495eb2-d145-42fa-9b61-d633ab2f00d9_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..a572f7f6ec6afe8d74e456ce333d6d73d165d2e3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/b4495eb2-d145-42fa-9b61-d633ab2f00d9_content_list.json @@ -0,0 +1,3556 @@ +[ + { + "type": "text", + "text": "MT-R1-Zero: Advancing LLM-based Machine Translation via R1-Zero-like Reinforcement Learning", + "text_level": 1, + "bbox": [ + 176, + 89, + 821, + 130 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zhaopeng Feng $^{1}$ Shaosheng Cao $^{2\\dagger}$ Jiahan Ren $^{1}$ Jiayuan Su $^{1}$", + "bbox": [ + 213, + 146, + 781, + 164 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Ruizhe Chen $^{1}$ Yan Zhang $^{1}$ Zhe Xu $^{2}$ Yao Hu $^{2}$ Jian Wu $^{1}$ Zuozhu Liu $^{1\\dagger}$", + "bbox": [ + 154, + 164, + 843, + 180 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Zhejiang University $^{2}$ Xiaohongshu Inc.", + "bbox": [ + 324, + 180, + 670, + 197 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{zhaopeng.23,zuozhuliu}@intl.zju.edu.cn", + "bbox": [ + 302, + 198, + 695, + 212 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{caoshaosheng,qiete,xiahou}@xiaohongshu.com", + "bbox": [ + 282, + 214, + 717, + 229 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 260, + 260, + 339, + 275 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Large-scale reinforcement learning (RL) methods have proven highly effective in enhancing the reasoning abilities of large language models (LLMs), particularly for tasks with verifiable solutions such as mathematics and coding. However, applying this idea to machine translation (MT), where outputs are flexibly formatted and difficult to automatically evaluate with explicit rules, remains underexplored. In this work, we introduce MT-R1-Zero, the first open-source adaptation of the R1-Zero RL framework for MT without supervised fine-tuning or cold-start. We propose a rule-metric mixed reward mechanism to guide LLMs towards improved translation quality via emergent reasoning. On the WMT 24 English-Chinese benchmark, our MT-R1-Zero3B-Mix achieves competitive performance, surpassing TowerInstruct-7B-v0.2 by an average of 1.26 points. Meanwhile, our MT-R1-Zero7B-Mix attains a high average score of 62.25 across all metrics, placing it on par with advanced proprietary models such as GPT-4o and Claude-3.5-Sonnet, while the MT-R1-Zero7B-Sem variant achieves state-of-the-art scores on semantic metrics. Moreover, our work exhibits strong generalization capabilities on out-of-distribution MT tasks, robustly supporting multilingual and low-resource settings. Extensive analysis of model behavior across different initializations and reward metrics offers pioneering insight into the critical role of reward design, LLM adaptability, training dynamics, and emergent reasoning patterns within the R1-Zero paradigm for MT. Our code is available at https://github.com/fzp0424/MT-R1-Zero.", + "bbox": [ + 142, + 288, + 460, + 800 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 114, + 824, + 258, + 839 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Large-scale Reinforcement Learning (RL) has empowered Large Language Models (LLMs) with strong reasoning capabilities (OpenAI, 2024; Team,", + "bbox": [ + 112, + 850, + 489, + 897 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/6f616ff4dcdccce81b29cf948d93e5785e99354f0a92b7b75e4b01182a86d233.jpg", + "image_caption": [ + "Figure 1: Performance comparison of contemporary LLM-based translation systems on the WMT 24 EN-ZH test set, plotted by average score across BLEU, COMETKiwi, and XCOMET versus model size." + ], + "image_footnote": [], + "bbox": [ + 517, + 259, + 873, + 462 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "2025a,b), demonstrating significant success in tasks such as mathematical reasoning or coding in which answers can be clearly verified. In particular, DeepSeek-R1-Zero (DeepSeek-AI et al., 2025) introduced a pure rule-based RL approach that directly fosters emergent reasoning ability without requirements on structured Chain-of-Thought (CoT) data (Wei et al., 2022; Cui et al., 2025) or sophisticated techniques such as Monte Carlo Tree Search (MCTS) (Silver et al., 2016; Luo et al., 2024; Qi et al., 2024; Guan et al., 2025). However, the applicability of these methods to machine translation (MT) remains challenging and underexplored, as MT outputs are flexibly generated and hard to evaluate automatically with explicit rules.", + "bbox": [ + 507, + 549, + 884, + 790 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent work has launched attempts to empower LLMs for MT with reasoning capabilities (Chen et al., 2025; Liu et al., 2025). Early studies investigate explicit reasoning methods for improved translation, such as finetuning with CoT (Wang et al., 2024a) or MCTS (Zhao et al., 2024), where advanced multi-step pipelines with self-correction or long-thought agentic mechanisms are further ex", + "bbox": [ + 507, + 793, + 884, + 921 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.10160v1 [cs.CL] 14 Apr 2025", + "bbox": [ + 21, + 307, + 60, + 724 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "† Corresponding author.", + "bbox": [ + 141, + 906, + 295, + 921 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 492, + 942, + 502, + 954 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "plored (Feng et al., 2024b; Wang et al., 2024b,a). Another line of work leverages RL to empower LLMs for MT through process reward models or supervised finetuning (SFT) with manually annotated CoT data (Feng et al., 2025; He et al., 2025). However, these methods often depend on manually designed or synthetically generated structured CoT data, rely on complex search algorithms, or require explicit multi-stage prompting, leaving the potential of pure RL-based approaches largely unexplored. Furthermore, the performance reported in these studies often lags behind state-of-the-art (SoTA) open-source or proprietary models.", + "bbox": [ + 112, + 84, + 492, + 294 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Developing pure RL methods to directly enhance the reasoning ability of LLMs for better translation requires answering three key questions: 1) Feasibility: How to design R1-Zero-like RL pipelines with effective reward signals to directly solve MT tasks without binary rule-based rewards; 2) Reasoning capability: Could pure RL training cultivate emergent reasoning abilities and induce models to generate explicit thinking patterns for MT, such as multi-step CoT or verification/reflection; 3) Generalizability: Could the training paradigm generalize across different models (e.g., pre-trained base models, instruction-tuned models, or models pretrained on translation data) or diverse downstream settings (e.g., out-of-distribution, multilingual or low-resource scenarios).", + "bbox": [ + 115, + 294, + 490, + 549 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we introduce MT-R1-Zero, the first open-source implementation that extends the RL-Zero-like RL training paradigm to MT. We propose a rule-metric mixed reward mechanism that adapts the original rule-based reward concept to effectively guide training in MT scenarios. We explore different rewards optimizing over lexical (Lex), semantic (Sem), and Lex-Sem mixed (Mix) objectives to guide LLMs towards improved translation quality via emergent reasoning. Our experiments demonstrate the efficacy of this approach: as RL training progresses, our MT-R1-Zero-3B-Mix achieves competitive performance, surpassing TowerInstruct-7B-v0.2 by an average of 1.26 points across all metrics (BLEU, COMETKiwi, XCOMET) on the WMT 24 English-Chinese (EN-ZH) benchmark. Meanwhile, our MT-R1-Zero-7B-Mix surpasses LLaMA-3.1-70B by an average of 1.24 points and Qwen2.5-72B by 0.48 points, even on par with top proprietary models such as GPT-4o and Claude-3.5-Sonnet. The MT-R1-Zero further demonstrate promising generalizability across multilingual and low-resource settings.", + "bbox": [ + 115, + 551, + 490, + 921 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Extensive experiments further provide key findings and insight into the adaptation of R1-Zero paradigm to MT. First, we empirically demonstrate that the choice of metric reward plays a pivotal role in steering RL optimization and translation style (semantic or lexical) (Finding 1). Further analysis reveals that MT-R1-Zero induces diverse emergent reasoning patterns, including dynamic language-of-thought transition during translation (Findings 2 and 3). We also identify distinct RL adaptability of different base LLMs (Finding 4). Ablation studies suggest that the pure RL process alone can lead to substantial translation improvements, independent of thinking morbidity (Section 6). Our core contributions are as follows:", + "bbox": [ + 507, + 84, + 885, + 325 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We present the first open-source implementation of the DeepSeek-R1-Zero paradigm for MT, achieving superior performance across indomain, OOD and generalization MT tasks.", + "- Our analysis reveals key findings and recipes for effective R1-Zero adaptation to MT, including reward metric selection, emergent reasoning patterns, training dynamics and LLM adaptability.", + "- Extensive experiments and ablations show that pure RL serves as the primary driver of MT improvements, with minimal dependence on forced reasoning or output length, highlighting the significant potential of RL for diverse translation applications and broader language tasks." + ], + "bbox": [ + 509, + 336, + 885, + 582 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 509, + 590, + 665, + 606 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "LLM Reasoning with Post-training. Recent research indicates that scaling test-time computation can significantly enhance the ability of LLMs to tackle complex reasoning tasks (OpenAI, 2024; Zeng et al., 2024; Xiang et al., 2025). Many approaches rely on sophisticated techniques such as step-level process reward models (PRMs) that provide granular feedback (Lightman et al., 2024; Yuan et al., 2024; Snell et al., 2024) or MCTS to explore potential reasoning paths (Feng et al., 2023; Qi et al., 2024; Guan et al., 2025). A recent alternative, DeepSeek-R1-Zero (DeepSeek-AI et al., 2025), demonstrated that large-scale pure RL, guided only by formatting rules and correctness of final predictions (rule-based reward), can motivate LLMs to develop self-emergent reasoning processes for complex reasoning tasks. Subsequent work (Hu et al., 2025; Face, 2025) successfully replicated this training paradigm in open-source", + "bbox": [ + 507, + 615, + 885, + 922 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 492, + 942, + 505, + 954 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "models, focusing on mathematical domains. Xie et al. (2025) further demonstrated the effectiveness and generalization capabilities of the R1-Zero paradigm using logic reasoning game problems, while Huang et al. (2025) explored its potential for vision reasoning. Despite its potential, the application of the R1-Zero RL paradigm to complex generation tasks like MT, in which the accuracy/quality of outputs is not rule-based and difficult to validate automatically, remains an open question.", + "bbox": [ + 112, + 84, + 489, + 244 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "LLM Reasoning for MT. Leveraging reasoning to improve MT has garnered increasing attention, as systematically explored in Chen et al. (2025) and Liu et al. (2025). Previous work have designed multi-step processes for MT, e.g., Feng et al. (2024b) introduced an API-based self-correcting framework, and Wang et al. (2024b) employed multi-task training followed by a multistage inference phase. Wang et al. (2024a) integrated a similar procedure into inference-time CoT, using a multi-agent mechanism to synthesize long CoT prompts for English-Chinese literary translation. Efforts have also focused on reward modeling for MT reasoning. Feng et al. (2025) constructed implicit process reward models for translation and explored their effectiveness when combined with test-time search. Recent study further evaluated explicit reasoning for MT using CoT fine-tuning and MCTS to expand test-time computation (Zhao et al., 2024). He et al. (2025) demonstrated that models can acquire reasoning-based translation capabilities through multi-stage training with manually constructed CoT templates.", + "bbox": [ + 115, + 247, + 489, + 615 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "However, these existing methods often necessitate manually designed or synthetically generated structured CoT data, rely on complex search algorithms (MCTS), or require explicit multi-stage prompting (self-correction). The effectiveness of large-scale pure RL training paradigms such as R1-Zero remains unexplored. Furthermore, the performance reported in these studies often lags behind state-of-the-art open-source or proprietary models.", + "bbox": [ + 112, + 618, + 489, + 763 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 Method", + "text_level": 1, + "bbox": [ + 112, + 778, + 218, + 794 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we present our method that trains a translation model with pure RL using a hybrid reward model. Unlike tasks with fixed correct answers, translation allows for multiple valid outputs, making the evaluation more complicated. In this work, we introduce a rule-metric mixed reward that integrates reasoning format checking with multi-", + "bbox": [ + 112, + 808, + 489, + 921 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "ple translation quality assessment metrics, which is used within the Group Relative Policy Optimization (GRPO) (Shao et al., 2024) algorithm to ensure stable and efficient RL training.", + "bbox": [ + 507, + 84, + 884, + 149 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 Rule-Metric Mixed Reward", + "text_level": 1, + "bbox": [ + 507, + 160, + 773, + 174 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In RL, the reward is the main signal that drives model training. DeepSeek-R1-Zero (DeepSeek-AI et al., 2025) employs simple rule-based rewards that check whether the final answer is correct and whether the response follows a specific format. This works well for tasks with fixed format correct answers such as math or coding. However, there is often no single \"correct\" output for MT, impeding the design of rule-based rewards. Fortunately, the MT community has developed many evaluation metrics to measure translation quality. Recent advancements in automated MT evaluation metrics have shown promise in aligning automated assessments with human translation quality judgments (Freitag et al., 2022, 2023). Thus, we design a rule-metric mixed reward, which consists of two parts: a Format Reward that checks output structure, and a Metric Reward that evaluates translation quality. We use a structured prompt template similar to that in DeepSeek-R1-Zero:", + "bbox": [ + 507, + 181, + 884, + 502 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Template for MT-R1-Zero", + "text_level": 1, + "bbox": [ + 536, + 514, + 744, + 530 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "A conversation between User and Assistant. The User asks for a translation from {src_language} to {tgt_language}, and the Assistant solves it. The Assistant first thinks about the reasoning process in the mind and then provides the user with the final translation. The reasoning process and final translation are enclosed within and tags, respectively, i.e., reasoning process here final translation here . \nUser:{src_text} \nAssistant:", + "bbox": [ + 532, + 541, + 858, + 766 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Here, src_language and tgt_language indicate the source and target languages, and src_text denotes the source text requiring translation.", + "bbox": [ + 507, + 783, + 884, + 831 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Format Reward: We use regular expression extraction to enforce a structured response format. The model is required to place its reasoning process within tags and provide the final translation inside ", + "bbox": [ + 507, + 841, + 885, + 920 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 492, + 942, + 504, + 953 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "tags. The format reward score $(S_{\\text{format}})$ is computed as:", + "bbox": [ + 112, + 84, + 489, + 116 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nS _ {f o r m a t} = \\left\\{ \\begin{array}{l l} 1, & \\text {i f f o r m a t i s c o r r e c t} \\\\ - 1, & \\text {i f f o r m a t i s i n c o r r e c t} \\end{array} \\right.\n$$\n", + "text_format": "latex", + "bbox": [ + 151, + 139, + 448, + 181 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Metric Reward: This reward evaluates the quality of model's translation, but only if the response format is correct. We use automatic evaluation metrics to calculate a translation quality score $S_{\\text{metric}}$ . We explore three approaches to compute $S_{\\text{metric}}$ :", + "bbox": [ + 112, + 190, + 489, + 272 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. N-gram Lexical Matching Reward (RewardLex): Metrics such as BLEU (Papineni et al., 2002) orchrF (Popovic, 2015) evaluate translation quality by measuring the difference (primarily lexical overlap) between the translation and the human-written reference. In our experiments, we employ the BLEU score calculated via the sacrebleu $^{1}$ .", + "2. Semantic and Contextual Reward (Reward-Sem): Learning-based metrics like COMET (Rei et al., 2020) and COMETKiwi (Rei et al., 2022) are trained on human judgments (e.g., MQM quality assessments (Freitag et al., 2021)). These metrics can recognize good translations even if the wording differs from the reference, as long as the meaning is preserved. We use the COMETKiwi- $23^{2}$ , which was used in the WMT 24 (Kocmi et al., 2024) and only needs the source sentence and the model's translation.", + "3. Lexical and Semantic Mixed Reward (Reward-Mix): To capture both lexical fidelity and semantic adequacy, we use a hybrid reward (Reward-Mix) that adds together Lexical Matching Reward (Reward-Lex) and Semantic and Contextual Reward (Reward-Sem)." + ], + "bbox": [ + 112, + 281, + 489, + 703 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Accordingly, the computation of $S_{\\text{metric}}$ depends on the selected reward configuration:", + "bbox": [ + 112, + 715, + 487, + 747 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nS _ {m e t r i c} = \\left\\{ \\begin{array}{l l} \\mathrm {B} (\\text {t r a n s}, \\text {r e f}), & \\text {i f R e w a r d - L e x} \\\\ \\mathrm {C K} (\\text {s r c}, \\text {t r a n s}) & \\text {i f R e w a r d - S e m} \\\\ \\mathrm {B} (\\text {t r a n s}, \\text {r e f}) + \\mathrm {C K} (\\text {s r c}, \\text {t r a n s}), & \\text {i f R e w a r d - M i x} \\end{array} \\right.\n$$\n", + "text_format": "latex", + "bbox": [ + 114, + 758, + 502, + 802 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where B denotes normalized BLEU score, CK denotes the COMETKiwi score, trans is the generated translation, ref is the reference translation, and src is the source text.", + "bbox": [ + 112, + 812, + 489, + 875 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Rule-Metric Mixed Reward: The final reward $r$ combines both the format reward ( $S_{\\text{format}}$ ) and the metric reward ( $S_{\\text{metric}}$ ). Formally, it is calculated using the following rule:", + "bbox": [ + 507, + 84, + 882, + 149 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nr = \\left\\{ \\begin{array}{l l} S _ {f o r m a t} - 2, & \\text {i f} S _ {f o r m a t} = - 1 \\\\ S _ {f o r m a t} + S _ {m e t r i c}, & \\text {i f} S _ {f o r m a t} = 1 \\end{array} \\right.\n$$\n", + "text_format": "latex", + "bbox": [ + 524, + 170, + 862, + 212 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $S_{\\text{metric}}$ is calculated only if the response format is correct. $S_{\\text{format}} = 1$ . If the format is incorrect ( $S_{\\text{format}} = -1$ ), we skip the metric reward evaluation and assign a fixed penalty (e.g., 2) to discourage format violations. This setup encourages the model to first learn the correct output structure. When the format is correct, the final reward becomes $r = 1 + S_{\\text{metric}}$ . Unlike traditional rule-based rewards that give a fixed score for correct outputs, our approach uses a continuous metric score. This means the reward can vary within the [1, 2] or [1, 3] range, depending on translation quality. As a result, the model receives more detailed feedback and can learn to improve even small differences in translation quality across correctly formatted outputs.", + "bbox": [ + 507, + 219, + 884, + 478 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 RL Algorithm", + "text_level": 1, + "bbox": [ + 507, + 488, + 668, + 504 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We use the Group Relative Policy Optimization (GRPO) algorithm (Shao et al., 2024) to train the translation model with our rule-metric mixed reward. In each training step, for a given translational question $q$ , we sample a group of candidate outputs $\\{o_1, o_2, \\dots, o_G\\}$ from the policy model $\\pi_{\\theta_{old}}$ . $A_i = \\frac{r_i - \\mathrm{mean}(\\{r_1, r_2, \\dots, r_G\\})}{\\mathrm{std}(\\{r_1, r_2, \\dots, r_G\\})}$ is the computed advantage using the group rule-metric mixed rewards $\\{r_1, r_2, \\dots, r_G\\}$ . GRPO then maximizes the following objective function to optimize $\\pi_\\theta$ :", + "bbox": [ + 507, + 508, + 882, + 671 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} J _ {\\mathrm {G R P O}} (\\theta) = \\mathbb {E} _ {q \\sim P (Q), \\{o _ {i} \\} _ {i = 1} ^ {G} \\sim \\pi_ {\\theta_ {\\mathrm {o l d}}} (O | q)} \\\\ \\left[ \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\min \\left(\\frac {\\pi_ {\\theta} (o _ {i} \\mid q)}{\\pi_ {\\theta_ {\\mathrm {o l d}}} (o _ {i} \\mid q)} A _ {i}, \\right. \\right. \\\\ \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta} (o _ {i} \\mid q)}{\\pi_ {\\theta_ {\\mathrm {o l d}}} (o _ {i} \\mid q)}, 1 - \\varepsilon , 1 + \\varepsilon\\right) A _ {i}\\left. \\right) \\\\ \\left. - \\beta D _ {\\mathrm {K L}} \\left(\\pi_ {\\theta} \\| \\pi_ {\\text {r e f}}\\right) \\right], \\tag {1} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 519, + 681, + 882, + 840 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\varepsilon$ and $\\beta$ are hyperparameters controlling the PPO clipping threshold and the weight of the Kullback-Leibler (KL) divergence penalty (Schulman et al., 2017; Shao et al., 2024), respectively. Specifically, $\\varepsilon$ determines the permissible range for policy", + "bbox": [ + 507, + 841, + 882, + 921 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "$^{1}$ https://github.com/mjpost/sacrebleu \n $^{2}$ https://huggingface.co/Unbabel/wmt23-cometkiwi-da-xl", + "bbox": [ + 112, + 881, + 482, + 919 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 492, + 942, + 504, + 954 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "updates, while $\\beta$ regulates the magnitude of the KL penalty during training to prevent excessive policy shifts from the reference policy $\\pi_{ref}$ (typically the initialization of $\\pi_{\\theta}$ ). $D_{KL}(\\pi_{\\theta} \\| \\pi_{\\mathrm{ref}}) = \\frac{\\pi_{\\mathrm{ref}}(o_i|q)}{\\pi_{\\theta}(o_i|q)} - \\log \\left(\\frac{\\pi_{\\mathrm{ref}}(o_i|q)}{\\pi_{\\theta}(o_i|q)}\\right) - 1$ is the KL divergence approximation term.", + "bbox": [ + 112, + 84, + 489, + 187 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 112, + 200, + 260, + 217 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1 Experimental Setup", + "text_level": 1, + "bbox": [ + 112, + 228, + 317, + 243 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Dataset and Benchmarks. Our primary experimental focus is on English (EN) and Chinese (ZH). Following Xu et al. (2023) and Feng et al. (2024a), we collect parallel examples $(\\mathrm{EN} \\rightleftharpoons \\mathrm{ZH})$ sourced from WMT 2017 through WMT 2020. We apply a filter to exclude sentences containing fewer than 30 characters, leading to a final training set of 13,130 examples. For evaluation, we assess performance on two in-domain translation tasks using recent WMT benchmarks: EN-ZH (WMT $24^{3}$ ) and ZHEN (WMT $23^{4}$ ). Additionally, we evaluate generalization capabilities on three out-of-distribution (OOD) translation directions: English-Japanese (EN-JA, WMT 2024), German-English (DE-EN, WMT 2023 Document-level), and German-Chinese (DE-ZH, Flores-200 (Costa-jussa et al., 2022)). Detailed statistics are presented in Table 8.", + "bbox": [ + 112, + 249, + 489, + 521 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Baselines. Our primary baselines encompass leading proprietary models, namely Claude-3.5-Sonnet (Anthropic, 2024), GPT-4o (OpenAI, 2023), and Gemini-1.5-Pro (Team et al., 2024), alongside advanced open-source models such as the Qwen2.5 series (Yang et al., 2024), LLaMA-3.1 series (Grattafori et al., 2024), and the translation-specific Tower family (Alves et al., 2024). Proprietary models were accessed via their APIs5. More evaluation details can be found in Appendix A.", + "bbox": [ + 112, + 523, + 489, + 684 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Evaluation Metrics. We assess translation quality using a suite of three complementary metrics: the lexical metric BLEU (Post, 2018), the reference-free learning-based metric COMETKiwi (Rei et al., 2022) (COMETKiwi-23-XL), and the reference-based learning-based metric XCOMET (Guerreiro et al., 2024) (XCOMET-XL). Together, these metrics provide a comprehensive view by evaluating both lexical fidelity and semantic adequacy.", + "bbox": [ + 112, + 684, + 489, + 829 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Training Details. Our implementation is based on", + "bbox": [ + 112, + 831, + 487, + 846 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "the verl $^{6}$ framework. We selected the Qwen2.5-base series (3B and 7B parameter variants) as starting models for MT-R1-Zero training. During training, we configure a batch size of 8 and utilize 8 rollouts per prompt within the GRPO algorithm. We employ a constant learning rate of 5e-7 and set the sampling temperature to 1.0. The maximum generation length for responses is capped at 1024 tokens. We set the KL penalty coefficient $\\beta$ to 0, thereby removing the KL constraint against the reference policy. This decision stems from our empirical observation that the KL penalty tends to restrict the model's exploration of diverse response lengths, which we will discuss further in Section 6.1. The PPO clipping range $\\epsilon$ is set to 0.2. All models are trained for 1 epoch on 4 NVIDIA H800 80G GPUs for about 13 hours.", + "bbox": [ + 507, + 83, + 884, + 357 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2 Main Results", + "text_level": 1, + "bbox": [ + 507, + 372, + 660, + 386 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In-Domain Performance. Our models show substantial gains over their corresponding base versions, and exhibit competing performance compared to existing SoTA benchmarks (Table 1). On the EN-ZH direction, our MT-R1-Zero-7B-Mix on the average score (62.25) also surpasses GPT-4o (61.86) and Qwen2.5-72B (61.77). In addition, the MT-R1-Zero-7B-Sem achieves the best semantic-level performance on EN-ZH, scoring 72.07 on COMETKiwi and 79.37 on XCOMET. This surpasses the strongest proprietary model, Claude3.5-Sonnet, by 1.68 COMETKiwi points and exceeds the best listed open-source model, Qwen2.5-72B, by more than 3 points. On the ZH-EN direction, MT-R1-Zero-7B-Mix is also highly competitive. Our MT-R1-Zero-7B-Sem achieves a COMETKiwi score of 71.66, which is comparable to the top closed models (Claude-3.5-Sonnet 71.69, GPT-4o 71.63) and surpasses strong open-source models such as LLaMA-3.1-70B (70.43) and Qwen2.5-72B (70.95). Furthermore, the MT-R1-Zero-3B-Sem delivers impressive performance for its scale. It scores 69.75 COMETKiwi on EN-ZH, which is approximately 1.7 points higher than the much larger LLaMA-3.1-70B and over 0.7 points above Qwen2.5-72B.", + "bbox": [ + 507, + 394, + 884, + 813 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Out-of-Distribution Performance. Table 2 reports the XCOMET of our models on OOD language pairs with a zero-shot setting (models trained only on EN-ZH/ZH-EN). Despite this challenging setup, our models exhibit strong generaliza", + "bbox": [ + 507, + 814, + 884, + 894 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "3https://www2.statmt.org/wmt24/translation-task.html", + "bbox": [ + 136, + 856, + 468, + 869 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "4https://www2.statmt.org/wmt23/translation-task.html", + "bbox": [ + 136, + 870, + 467, + 882 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "5The specific proprietary models accessed include Anthropic's claude-3-5-sonnet-20241022, OpenAI's gpt-4o-2024-08-06, and Google's gemini-1.5-pro.", + "bbox": [ + 115, + 883, + 485, + 920 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "$^{6}$ https://github.com/volcengine/verl", + "bbox": [ + 529, + 906, + 749, + 920 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 492, + 942, + 504, + 953 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/aeb6fe232c20e93a9294e931828401ff289284f573bd6eaeb2a718bc2b332396.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MODELZH-ENEN-ZH
BLEUCOMETKiwiXCOMETAvg.BLEUCOMETKiwiXCOMETAvg.
Closed
Claude-3.5-Sonnet (2024/10)22.5571.6987.3260.5238.6370.3978.2462.42
GPT-4o (2024/08)22.5771.6387.2260.4741.1369.0175.4361.86
Gemini-1.5-Pro (2025/03)18.3469.2385.5557.7139.8267.4776.2661.18
Open
General Purpose LLMs
LLaMA-3.1-70B-Instruct25.1970.4386.2160.6139.8268.0575.1761.01
Qwen2.5-72B-Instruct21.9670.9587.0759.9939.2969.0476.9761.77
Qwen2.5-32B-Instruct20.5469.3585.4758.4536.3668.4374.9059.90
Translation-Specific LLMs
TowerInstruct-13B-v0.124.7270.1785.6960.1937.0666.2273.1358.80
TowerInstruct-7B-v0.223.3269.9984.9359.4134.9364.0470.6756.55
Ours
Qwen2.5-3B-Base14.2664.8676.7651.9615.9052.0567.1345.03
MT-R1-Zero-3B-Lex21.5366.3381.6956.5233.7060.5865.6753.32
MT-R1-Zero-3B-Sem18.4170.3385.9858.2424.3269.7576.9257.00
MT-R1-Zero-3B-Mix22.5468.8484.0858.4936.2765.0572.1057.81
Qwen2.5-7B-Base18.2368.2784.9957.1631.1463.3869.8354.78
MT-R1-Zero-7B-Lex23.5665.3582.1257.0140.1164.5770.2158.30
MT-R1-Zero-7B-Sem16.6271.6686.0758.1223.0772.0779.3758.17
MT-R1-Zero-7B-Mix23.9870.8186.1760.3240.9769.4376.3662.25
", + "bbox": [ + 117, + 80, + 878, + 399 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/c14738ed2a90fea60765a305cf9e025702d36c8cc35a56742176cf90905b8840.jpg", + "table_caption": [ + "Table 1: Performance comparison on in-domain translation directions (EN-ZH, ZH-EN) using BLEU, COMETKiwi, and XCOMET metrics, with average metric scores (Avg.). MT-R1-Zero variants (-Lex, -Sem, -Mix) are compared against closed and open baselines, which are further categorized by accessibility and specialization. The -Mix variant often achieves the best balance, while -Sem reaches peak semantic scores." + ], + "table_footnote": [], + "table_body": "
MODELOUT-OF-DISTRIBUTION
EN-JADE-EN (DOC)DE-ZHAvg.
Strong Baseline
Qwen2.5-72B-Instruct76.8689.5188.4284.93
LLaMA3.1-70B-Instruct75.6788.7287.4283.94
Same-size Baseline
Qwen2.5-7B-Instruct63.7487.4584.4378.54
LLaMA-3.1-8B-Instruct64.5086.8482.2377.86
TowerInstruct-7B-v0.256.7389.4784.2876.83
MT-R1-Zero-7B-Lex60.6585.2583.8676.59
MT-R1-Zero-7B-Sem71.9587.6887.6682.43
MT-R1-Zero-7B-Mix68.4988.6988.6981.96
", + "bbox": [ + 117, + 487, + 482, + 627 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2: Out-of-distribution performance comparison using the XCOMET metric on EN-JA, DE-EN (Document-level), and DE-ZH.", + "bbox": [ + 112, + 638, + 487, + 681 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "tion. The MT-R1-Zero-7B-Sem achieves the highest average XCOMET score (82.43) across the OOD tasks, reaching top scores on EN-JA (71.95) and DE-EN (87.68). The MT-R1-Zero-7B-Mix also demonstrates highly competitive generalization with an average score of 81.96, and secures the highest score on DE-ZH (88.69). While these variants do not consistently surpass the much larger strong baselines (Qwen2.5-72B Avg. 84.93, LLaMA3.1-70B Avg. 83.94), they are still highly competitive. Crucially, MT-R1-Zero-7B-Sem and -Mix significantly outperform all same-size baselines (Qwen2.5-7B-Instruct Avg. 78.54, LLaMA", + "bbox": [ + 110, + 711, + 489, + 921 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.1-8B-Instruct Avg. 77.86, TowerInstruct-7B-v0.2 Avg. 76.83) by a considerable margin (at least 3.4 points). These OOD results suggest that the quality improvements in MT-R1-Zero can effectively transfer to unseen language pairs. Results using COMETKiwi and BLEU are also provided in Appendix Tables 6 and 7, respectively.", + "bbox": [ + 507, + 489, + 884, + 602 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5 Key Findings and Insight", + "text_level": 1, + "bbox": [ + 507, + 615, + 764, + 633 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Based on our extensive experiments adapting the R1-Zero paradigm to MT, we identify several key findings regarding the underlying mechanisms, design ideas, and emergent behaviors of our MT-R1-Zero framework.", + "bbox": [ + 507, + 644, + 882, + 722 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1 Impact of Reward Metric Selection", + "text_level": 1, + "bbox": [ + 507, + 737, + 831, + 753 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As detailed in Section 3.1, we explore three metric rewards: Reward-Lex, Reward-Sem, and Reward-Mix. Our results demonstrate that the choice among these significantly affects the learning target and final model outputs, as stated in Finding 1.", + "bbox": [ + 507, + 758, + 882, + 839 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Finding 1: Reward metric selection critically shapes optimization targets and translation style.", + "bbox": [ + 532, + 858, + 858, + 906 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 492, + 942, + 505, + 954 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/23089aaf58a392db50729b3ce583e9bf14970ec78ddace94058d3f25fd77b580.jpg", + "image_caption": [ + "Figure 2: Training dynamics using Reward-Lex, Reward-Sem, and Reward-Mix, evaluated with COMETKiwi, BLEU, and XCOMET." + ], + "image_footnote": [], + "bbox": [ + 124, + 80, + 381, + 239 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6c56dc9a984522cc0e741f0de6a2bac59cf804d50995fb0411cd7ea83591766e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 379, + 80, + 625, + 239 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b0f8de023d87072cb76332d6598f440181ff0dd4eb685e2fd493c511dac131f7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 625, + 80, + 870, + 239 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/ba5a6d095eeb40acfd1aa4e6f18c36e661f2b8ecf8e4bc67ac76bd24ea907ff4.jpg", + "image_caption": [ + "Figure 3: Qualitative examples illustrates the effect of different reward functions (Reward-Lex, Reward-Sem, Reward-Mix) on EN-ZH translation, where the stylistic differences are driven by reward optimization (Finding 1)." + ], + "image_footnote": [], + "bbox": [ + 124, + 282, + 875, + 601 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Figure 2 presents the training dynamics with different rewards. Training with Reward-Lex maximizes BLEU scores, often at the expense of semantic scores, while Reward-Sem maximizes COMETKiwi, leading to a decline in BLEU. Training with Reward-Mix improves both metrics, with a trade-off of achieving sub-optimal COMETKiwi compared to Reward-Sem. Independent evaluation with XCOMET further supports this finding, showing consistent improvements for Sem and Mix variants while fluctuating for Lex. This finding aligns with the insight from Chen et al. (2025), suggesting that lexical and semantic assessments are complementary, particularly for reasoning-oriented LLMs, and combining them can offer a more comprehensive evaluation signal.", + "bbox": [ + 112, + 657, + 489, + 914 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Qualitatively (Figure 3), this optimization alignment manifests as distinct translation styles. BLEU optimization encourages literal, n-gram focused translations, potentially sacrificing nuance. COMETKiwi optimization fosters translations that prioritize semantic faithfulness, even if lexically divergent from references. In contrast, the mixed reward yields balanced translations. This demonstrates that the metric reward fundamentally dictates the nature of the translation quality learned (e.g., semantic v.s. lexical). Therefore, careful metric selection and deliberate fusion are essential for tailoring RL-based MT refinement towards specific and desired translations.", + "bbox": [ + 507, + 657, + 884, + 879 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 492, + 942, + 502, + 952 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d091b63c34fab3acdd1a2db73581403ba0aaffa3a30b26bc7b4f08eb9981e3cc.jpg", + "image_caption": [ + "Figure 4: Training dynamics of MT-R1-Zero models (using Reward-Sem). Left: COMETKiwi score progression for 3B and 7B models on EN-ZH and ZH-EN test sets. Right: Average response length changes over training steps, exhibiting the classic decrease-then-increase pattern (Finding 2)." + ], + "image_footnote": [], + "bbox": [ + 147, + 83, + 495, + 253 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/1b66e6c2a22b2c8383e6ce327aa237f85185f220afb86525d8f7223f066d8e8c.jpg", + "image_caption": [ + "Figure 5: Evolution of an MT-R1-Zero model's reasoning process and translation output for the Chinese source text \"其影响可能类似于2008年的经济危机\" at different training steps (0, 400, 1600), showcasing the shift from decomposition to more semantic analysis (Finding 2)." + ], + "image_footnote": [], + "bbox": [ + 497, + 82, + 847, + 252 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Source (ZH):至于电量的详细说明,一般人看着心里会发虚。", + "bbox": [ + 156, + 321, + 428, + 332 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Reference (EN):The majority of individuals will feel guilty after reading the in-depth description of the power.", + "bbox": [ + 458, + 318, + 798, + 337 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Step 0", + "text_level": 1, + "bbox": [ + 470, + 347, + 522, + 359 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "User:至于电量的详细说明,一般人看着心里会发虚。", + "bbox": [ + 142, + 357, + 373, + 367 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Assistant: To translate the given Chinese sentence into English, I will first break down the sentence into its components and then find the appropriate English equivalents for each component. The sentence is: \"至于电量的详细说明,一般人看着心里会发虚。\"", + "bbox": [ + 144, + 367, + 845, + 391 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "1. 至于 - This is a preposition that can be translated to \"as for\" or \"regarding.\"", + "bbox": [ + 144, + 391, + 608, + 401 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "2. 电量 - This is a compound noun that means \"electricity\" or \"power.\"", + "bbox": [ + 144, + 401, + 537, + 410 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3. 的 - This is a possessive particle that indicates possession or relationship.", + "bbox": [ + 144, + 410, + 598, + 417 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4. 详细说明 - This is a compound noun that means \"detailed explanation\" or \"explanation in detail.\"", + "bbox": [ + 144, + 417, + 702, + 425 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. 一般 - This is an adverb that means \"generally\" or \"usually.\"", + "bbox": [ + 144, + 425, + 505, + 432 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. 人 - This is a noun that means \"people\" or \"persons.\"", + "bbox": [ + 144, + 432, + 463, + 441 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "7. 看着 - This is a verb phrase that means \"looking at\" or \"seeing.\"", + "bbox": [ + 144, + 441, + 529, + 450 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "8. 心里 - This is a compound noun that means \"in one's heart\" or \"in one's mind.\"", + "bbox": [ + 144, + 450, + 603, + 458 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "9. 会 - This is a modal verb that means \"will\" or \"would.\"", + "bbox": [ + 144, + 458, + 475, + 466 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "10. 发虚 - This is a verb phrase that means \"to feel faint\" or \"to feel dizzy.\"", + "bbox": [ + 144, + 466, + 591, + 475 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Now, I will combine these components to form the English translation. As for the detailed explanation", + "bbox": [ + 144, + 475, + 828, + 483 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "of electricity, generally speaking, people will feel faint when they see it. ", + "bbox": [ + 144, + 483, + 653, + 492 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "[COMETKiwI:60.11]", + "bbox": [ + 144, + 492, + 250, + 499 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Step 400", + "text_level": 1, + "bbox": [ + 263, + 508, + 331, + 521 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "User:至于电量的详细说明,一般人看着心里会发虚。", + "bbox": [ + 142, + 524, + 371, + 533 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Assistant: My task is to accurately translate the Chinese text provided by the user into natural and coherent English while preserving the original meaning, tone, and nuances of the Chinese sentence. As for the detailed explanation of the battery, most people would feel uneasy just by looking at it. [COMETKwi: 68.44]", + "bbox": [ + 144, + 533, + 448, + 601 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Step 1600", + "text_level": 1, + "bbox": [ + 618, + 508, + 692, + 521 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "User:至于电量的详细说明,一般人看着心里会发虚。", + "bbox": [ + 463, + 523, + 692, + 532 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Assistant: I translated the Chinese text \"User:至于电量的详细说明,一般人看着心里会发虚。\" to English as \"As for the detailed explanation of the battery level, most people feel a bit uneasy when they see it.\" The original text quotes the user saying that when people see the detailed information about the battery level, they feel a bit uncomfortable or uncertain about it.translate> \"As for the detailed explanation of the battery level, most people feel a bit uneasy when they see it.\" [COMETHiwi: 74.89]", + "bbox": [ + 463, + 532, + 850, + 607 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.2 Emergence and Evolution of Translation Thinking Patterns", + "text_level": 1, + "bbox": [ + 112, + 682, + 478, + 713 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "By observing the training process, we provide several insights into model adaptation and the emergence of reasoning.", + "bbox": [ + 112, + 721, + 489, + 769 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Finding 2: Response length initially declines rapidly and then gradually increases as training progresses.", + "bbox": [ + 139, + 788, + 463, + 835 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Figure 4 (Right) depicts the pattern in Finding 2 alongside consistent COMETKiwi improvements (Left). Qualitative analysis (Figure 5) reveals that this length trajectory reflects evolving reasoning", + "bbox": [ + 112, + 856, + 487, + 921 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "strategies. The initial decline corresponds to the model mastering the required format while transitioning from naive decomposition (Step 0) to more efficient, direct translations. The subsequent increase aligns with the development of richer semantic analysis and deeper contextual reasoning within the tags (Step 1600).", + "bbox": [ + 507, + 682, + 884, + 794 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Finding 3: Diverse reasoning patterns emerge autonomously, varying in style and complexity, and moreover, the internal reasoning language could dynamically transit to target languages even for OOD settings.", + "bbox": [ + 534, + 825, + 858, + 906 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 492, + 942, + 504, + 954 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "User:A few more sensors in general would be nice I guess.", + "bbox": [ + 141, + 99, + 326, + 120 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Assistant: I will translate the given English sentence into Japanese. The sentence is \"A few more sensors in general would be nice I guess.\" I will break down the sentence into its main components and find the appropriate Japanese words and phrases for each part. \n 一般、少让更多的SENSAがた或良好を意思檔。", + "bbox": [ + 141, + 126, + 332, + 240 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "EN-ZH", + "text_level": 1, + "bbox": [ + 426, + 86, + 470, + 97 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "User:#PilotTraining #Flying #TeaganAir #PA28 #Pilot #FlightSchool", + "bbox": [ + 339, + 101, + 505, + 128 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Assistant: I will translate the given text from English to Chinese. The text contains hashtags and mentions of a pilot training program, flying, a company called Teagan Air, a specific aircraft model (PA28), and a pilot. I will translate each part of the text accordingly.\n #飞行员培训 #飞行 #泰根航空 #PA28 #飞行员 #飞行学校 ", + "bbox": [ + 339, + 135, + 554, + 240 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "DE-ZH", + "text_level": 1, + "bbox": [ + 690, + 86, + 732, + 97 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "User:Heutzutage schreiben Menschen Nachrichten auf Computerbildschirmen, ohne je in die Nähe eines Anspitzers kommt zu müssen.", + "bbox": [ + 559, + 101, + 865, + 128 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Assistant: To translate the given German sentence into Chinese, I will first break down the sentence into its main components and then find the appropriate Chinese equivalents for each component. The main components are: \"Heutzutage\" (today), \"schreiben\" (write), \"Menschen\" (people), \"Nachrichten\" (messages), \"auf Computerbildschirmen\" (on computer screens), \"ohne je\" (without ever), \"in die Näre\" (near), \"eines Anspitzers\" (a pencil), and \"kommen zu müssen\" (to have to come). \n 现在,人们可以在电脑屏幕上写信,而无需接近铅笔。", + "bbox": [ + 559, + 135, + 875, + 240 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "User:A few more sensors in general would be nice I guess.", + "bbox": [ + 142, + 254, + 324, + 273 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Assistant: 我はルーダーから、 [User: あ、一般にルーダーが減少増えたARDS']」う英語の文を翻訹てほしぃと類まんた。その文は、ルーダーがルーダーの数を増やてほしぃと顧ったとを表語言。私はこの文を本日語に翻訹てまん。「あ、一般にルーダーをむう減少増やてむいかな」」う意味です。 「一般に、ルーダーをむう減少増やてむいかな。」", + "bbox": [ + 142, + 279, + 326, + 393 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "User:#PilotTraining #Flying #TeaganAir #PA28 #Pilot #FlightSchool", + "bbox": [ + 339, + 253, + 571, + 271 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Assistant: 我在思考如何将这段英文文本“#PilotTraining #Flying #TeaganAir #PA28 #Pilot #FlightSchool\"翻译成中文。文本中包含多个标签,这些标签可能来自社交媒体帖子或文章,用于描述与飞行员培训、飞行、TeaganAir航空公司、PA28飞机型号、飞行员、飞行学校等相关的内容。翻译时,我会将每个标签逐一翻译成中文,保留其原始含义和用途。翻译后的文本内容如下:", + "bbox": [ + 339, + 278, + 571, + 356 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "#飞行员培训 #飞行 #泰根航空 #PA28型飞机 #飞行员 #飞行学校 \n#飞行员培训 #飞行 #泰根航空#PA28型飞机 #飞行员 #飞行学校", + "bbox": [ + 339, + 356, + 571, + 392 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "User:Heutzutage schreiben Menschen Nachrichten auf Computerbildschirmen, ohne je in die Höhe eines Anspitzers kommt zu müssen.", + "bbox": [ + 581, + 253, + 862, + 280 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Assistant: 我在思考如何将这段德文文本 \"Heutzzutege schreiben Menschen Nachrichten auf Computerbildschirmen, ohne je in die Höhe eines Anspitzers kommt zu müssen.\"翻译成中文。文本中用户引用了一段话,指出如今人们可以在电脑屏幕上撰写消息,而无需接近任何针状物。原文指出,现在人们可以通过电脑屏幕发送消息,而不需要亲自接触到针状物(可能是指针式打字机或其他类似设备)。这段文本可能是在比较现代的数字通信方式与传统的打字或书写方式的差异。\n\"如今,人们可以在电脑屏幕上编写消息,而无需接近任何针状物。", + "bbox": [ + 581, + 287, + 872, + 385 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Figure 6: Examples illustrating language-of-thought phenomenon, i.e., transition of the internal reasoning language in MT-R1-Zero models. The reasoning language transits from English at Step 0 to target language at Step 1600, indicated by bold text across various OOD test pairs (Finding 3).", + "bbox": [ + 112, + 414, + 882, + 456 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "As R1-Zero-like training typically lacks a cold-start (DeepSeek-AI et al., 2025; Huang et al., 2025) phase with predefined reasoning examples, the observed thinking processes should be emergent and shaped by the RL objective. Our framework incentivizes a variety of reasoning styles within the tags (Figure 12). In particular, we observe patterns ranging from structured multi-step decomposition (Types I-III) to more colloquial processing (Types IV-V). While some instances include explicit \"review/refine\" steps, these generally appear as pre-planned components rather than the conversational, iterative self-correction characteristic of the \"Aha moment\" reported in mathematical reasoning tasks (DeepSeek-AI et al., 2025; Xie et al., 2025; Hu et al., 2025). This suggests that while MT-R1-Zero successfully encourages thinking, the complexity and specific nature of emergent reasoning are task-dependent.", + "bbox": [ + 112, + 472, + 489, + 778 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Furthermore, we observe a striking and interesting \"language-of-thought\" (transition in the language used for internal reasoning) phenomenon during OOD testing (Figure 6). While base models often use English as default thinking language based on template, MT-R1-Zero models progressively transit to utilize the target language of the translation task for their reasoning process within", + "bbox": [ + 112, + 791, + 489, + 921 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "the $<$ think $>$ $\\angle$ /think> block during training (see bold Japanese or Chinese text in step 1600). This dynamic adaptation of the internal \"language of thought\", conditioned on the task, emerges even without direct supervision on reasoning language.", + "bbox": [ + 507, + 472, + 882, + 552 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5.3 Training Dynamics of Different LLMs", + "text_level": 1, + "bbox": [ + 507, + 609, + 853, + 626 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The effectiveness and training behavior of MT-R1-Zero are significantly influenced by the base LLM architecture and its initial state (pre-trained vs. instruction-tuned). We compare models from three distinct families: general-purpose (Qwen2.5 series $^{7}$ , LLaMA-3.1 series $^{8}$ ) and translation-specific (Tower family $^{9}$ ). For each model family, we include both the pre-trained base model and the corresponding instruction-finetuned variant, adapting their chat templates for the Instruct models.", + "bbox": [ + 507, + 657, + 884, + 818 + ], + "page_idx": 8 + }, + { + "type": "aside_text", + "text": "eep", + "bbox": [ + 117, + 148, + 134, + 189 + ], + "page_idx": 8 + }, + { + "type": "page_footnote", + "text": "7https://huggingface.co/Qwen", + "bbox": [ + 529, + 879, + 715, + 894 + ], + "page_idx": 8 + }, + { + "type": "page_footnote", + "text": "8https://huggingface.co/meta-llama", + "bbox": [ + 531, + 894, + 747, + 906 + ], + "page_idx": 8 + }, + { + "type": "page_footnote", + "text": "$^{9}$ https://huggingface.co/Unbabel/TowerBase-7B-v0.1", + "bbox": [ + 532, + 906, + 853, + 920 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 492, + 941, + 505, + 953 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/af5d7b3938351799e5ac948de228b3fbb521d21706b10eec2d5ab3231e32afd1.jpg", + "image_caption": [ + "Figure 7: Comparison of training dynamics for different model families (Qwen2.5, LLaMA-3.1, Tower) undergoing MT-R1-Zero RL training, highlighting differences in adaptability (Finding 4)." + ], + "image_footnote": [], + "bbox": [ + 127, + 82, + 369, + 225 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/c68080def505d3b55351519d62c72fbac53f65b34aaeb4005232f5ce0d4e46b6.jpg", + "image_caption": [ + "Figure 8: Qualitative comparison of final outputs from different starting models trained with MT-R1-Zero for the Chinese input \"好看,但有点点小遗憾\", illustrating varying degrees of format adherence and reasoning generation, including format hacking by some models (Finding 4)." + ], + "image_footnote": [], + "bbox": [ + 369, + 82, + 618, + 225 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/c383c95349861f941ebf06b185adff0fab2153b9e73c7a737770d34165272b50.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 628, + 82, + 870, + 225 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Reference (EN): It's encouraging to see, although there is some disappointment", + "text_level": 1, + "bbox": [ + 127, + 277, + 458, + 298 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Qwen2.5-7B-Base", + "text_level": 1, + "bbox": [ + 233, + 303, + 357, + 313 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "User:好看,但有点点小遗憾", + "bbox": [ + 129, + 318, + 280, + 329 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Assistant: I translated the Chinese text \"User:好看,但有点点小遗憾\" to English as \"It looks good, but there is a tiny bit of disappointment.\" The user is saying that the object or situation being discussed appears nice, but there is a small aspect that they are not completely satisfied with.", + "bbox": [ + 129, + 329, + 457, + 395 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": " \"It looks good, but there is a tiny bit of disappointment.", + "bbox": [ + 129, + 395, + 445, + 416 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "LLaMA-3.1-8B-Base", + "text_level": 1, + "bbox": [ + 223, + 430, + 366, + 441 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "User:好看,但有点点小遗憾", + "bbox": [ + 129, + 447, + 280, + 457 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Assistant: reasoning to generate translation \"It's beautiful, but there are a few minor drawbacks.\" ", + "bbox": [ + 129, + 458, + 465, + 486 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Qwen2.5-7B-Instruct", + "text_level": 1, + "bbox": [ + 588, + 281, + 747, + 292 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "user 好看,但有点点小遗憾 \nassistant The user is asking to translate the Chinese phrase \"好看,但有点点小遗憾\" to English. Let's break down the phrase:", + "bbox": [ + 475, + 299, + 818, + 338 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- \"好看\" means \"it looks good\" or \"it is attractive.\"\n- \"但\" is a conjunction that means \"but.\"", + "- \"有点点小遗憾\" means \"there is a slight bit of regret\" or \"there is a small sense of disappointment.\"" + ], + "bbox": [ + 475, + 347, + 858, + 386 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Putting it all together, the phrase suggests that something looks good, but there is a small element of disappointment or regret. A possible translation could be: \"It looks good, but there's a tiny bit of disappointment.\"", + "bbox": [ + 473, + 395, + 863, + 435 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "To ensure the translation captures the nuance, we can refine it to: \"It looks good, but there's a slight sense of regret.\" \"It looks good, but there's a slight sense of regret.\" ", + "bbox": [ + 473, + 443, + 850, + 483 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "LLaMA-3.1-8B-Instruct", + "text_level": 1, + "bbox": [ + 206, + 497, + 383, + 508 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "user \n好看,但有点点小遗憾", + "bbox": [ + 134, + 514, + 401, + 533 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": " assistant ", + "bbox": [ + 134, + 533, + 455, + 552 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": " \"It's nice, but there's a little bit of a disappointment.\" \"It's nice, but there's a little bit of a disappointment.\" ", + "bbox": [ + 134, + 552, + 448, + 592 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "TowerBase-7B", + "text_level": 1, + "bbox": [ + 495, + 497, + 596, + 508 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "User:好看,但有点点小遗憾", + "bbox": [ + 478, + 516, + 606, + 536 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Assistant: Reasoning", + "bbox": [ + 478, + 536, + 605, + 555 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": " \nTranslation", + "bbox": [ + 478, + 555, + 608, + 574 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "", + "bbox": [ + 478, + 574, + 561, + 583 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "TowerInstruct-7B", + "text_level": 1, + "bbox": [ + 678, + 497, + 811, + 508 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "user好看,但有点点小遗憾", + "bbox": [ + 631, + 516, + 860, + 536 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": " assistant 好看,但有点点小遗憾", + "bbox": [ + 631, + 536, + 845, + 555 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": " It was beautiful, but there was a small disappointment:", + "bbox": [ + 631, + 555, + 847, + 583 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Finding 4: LLM architectures exhibit distinct adaptability and effectiveness under MT-R1-Zero, with Qwen showing the highest compatibility in format learning and reasoning generation, while LLaMA and Tower face more challenges and tend towards \"format hacking\".", + "bbox": [ + 139, + 676, + 463, + 789 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "As shown in Figure 7, both the translation-specific (Tower) and LLaMA-3.1 models exhibit significantly slower adaptation to the required / format compared to Qwen models, as evidenced by their delayed format error reduction. Furthermore, qualitative analysis (Figure 8) reveals that these models often", + "bbox": [ + 112, + 808, + 489, + 921 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "circumvent meaningful reasoning by generating minimal or templated placeholder content in the tags, potentially \"hacking\" the format reward. In contrast, Qwen2.5 models demonstrate stronger adaptability, consistently producing coherent reasoning text within the structured framework. This suggests that architectures like Qwen may possess inherent advantages for integrating structured reasoning via RL, a finding that aligns with prior work on cognitive behaviors in related domains (Gandhi et al., 2025). However, even Qwen2.5 models occasionally regress to simplistic one-sentence outputs during reasoning tasks, underscoring the instability of exploration in R1-Zero-like training paradigms.", + "bbox": [ + 507, + 669, + 884, + 910 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 489, + 941, + 509, + 954 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/22f396b2e96f47bf65e8777892aa22c60a338ae018f764bcd309f20b1afe93a1.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelIn-domainOut-of-distribution
ZH-ENEN-ZHEN-JADE-ZHDE-EN (Doc)
COMETKiwiXCOMETCOMETKiwiXCOMETCOMETKiwiXCOMETCOMETKiwiXCOMETCOMETKiwiXCOMET
Qwen2.5-7B (SFT)69.2984.8067.2574.2967.7765.3967.0186.1767.4486.74
Qwen2.5-7B (RL w/o thinking)70.7886.2669.6276.0368.6868.7767.8486.6768.3188.30
Qwen2.5-7B (RL w/ thinking)70.8186.1769.4376.3669.2768.4968.7488.6968.7488.69
", + "bbox": [ + 117, + 80, + 880, + 161 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/3fd6b1496e8692eacee3bd2a1d2b0277004e03a36cfe84b1be7952860d106bbd.jpg", + "table_caption": [ + "Table 3: Performance comparison of different training paradigms: Supervised Fine-Tuning (SFT) vs. RL with explicit thinking (RL w/ thinking) vs. RL without explicit thinking (RL w/o thinking). Results shown for in-domain and out-of-distribution tasks support the finding that the RL process itself is the primary driver of gains (Section 6)." + ], + "table_footnote": [], + "table_body": "
MODELDRT TEST SET
BLEUCOMETKIWI-22XCOMETAvg.
Qwen2.5-7B-Instruct24.1769.6661.8451.89
TowerInstruct-13B22.7170.5562.7752.01
DRT-7B35.5171.7768.4058.56
DRT-14B36.3772.1569.6459.39
Qwen2.5-7B (SFT)21.6169.9163.2051.57
Qwen2.5-7B (RL w/o thinking)28.4472.9266.1755.84
Qwen2.5-7B (RL w/ thinking)28.4273.2066.6456.09
", + "bbox": [ + 117, + 236, + 485, + 332 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Table 4: Performance comparison on the DRT literature translation dataset (Wang et al., 2024a) using BLEU, COMETKiwi-22, and XCOMET metrics.", + "bbox": [ + 112, + 342, + 489, + 387 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "6 Analysis and Ablation", + "text_level": 1, + "bbox": [ + 112, + 410, + 339, + 426 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "6.1 KL Penalty Constrains Response Length but Not Quality Gains", + "text_level": 1, + "bbox": [ + 112, + 435, + 480, + 467 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We investigate the effectiveness of the KL term in the GRPO objective (Equation 1) on response length and translation quality, as it would regularize the policy by discouraging large deviations from the initial reference model. We conducted experiments without the KL penalty (setting $\\beta = 0$ , Figure 9), and found that the average response length, after an initial drop, began to fluctuate and trend upward during training. This pattern is consistent with R1-Zero-like results in mathematical tasks (Yu et al., 2025; Yeo et al., 2025). Additional ablation of the KL penalty with COMETKiwi reveals that the improvement of translation quality appears to be largely independent of the thinking vocabulary. Significant quality gains were achieved in early-stage training (e.g., before Steps 400) before a substantial increase in response length, even in experiments conducted without the KL penalty. This suggests that performance improvements in the MT-R1-Zero setup could not be attributed solely or primarily to increasing reasoning vocabulary.", + "bbox": [ + 112, + 470, + 489, + 810 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "6.2 Disentangling RL and Explicit Thinking", + "text_level": 1, + "bbox": [ + 112, + 820, + 478, + 837 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "To determine whether performance gains stem primarily from the explicit step or the underlying RL optimization, we conducted an ablation study comparing three training paradigms using the similar setup from Section 4.1: 1) Supervised Fine", + "bbox": [ + 112, + 841, + 490, + 921 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/0a16468e5c467f1eb3ed748065d44da44f367778442a612e837a87eaa1935712.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 519, + 237, + 870, + 380 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/e55e14eebe1ca2bdc590ae8036f91c559c49fa5d52dfe0db21c613c1ce9d1660.jpg", + "image_caption": [ + "Figure 9: Effect of the KL divergence penalty on EN-ZH COMETKiwi score and response length progression for models trained with (w/ KL, $\\beta = 0.01$ ) and without (w/o KL, $\\beta = 0$ ) the penalty. Experiments are conducted three times with MT-R1-Zero-7B-Sem." + ], + "image_footnote": [], + "bbox": [ + 519, + 391, + 865, + 542 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Tuning (SFT): The same base model is fine-tuned on the parallel data using LLaMA-Factory (Zheng et al., 2024), establishing a non-RL baseline. 2) RL w/ thinking (MT-R1-Zero-Sem): The model is trained with the rule-metric mixed reward (Format Reward and Reward-Sem) while enforcing explicit / structure generation. 3) RL w/o thinking: The model is trained with RL-zero optimization (Reward-Sem) solely to the final output, with no constraints on explicit step generation. See Appendix B for more details.", + "bbox": [ + 507, + 645, + 884, + 837 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The results are presented in Table 3. It reveals that the \"RL w/o thinking\" variant achieves performance comparable to MT-R1-Zero (\"RL w/ thinking\") across both in-domain and OOD tasks, while both RL configurations substantially outperform", + "bbox": [ + 507, + 841, + 885, + 921 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 489, + 942, + 507, + 954 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/02ab532ad654b385575826e4ea322358bc806133d469827fce79440c3868a45c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 124, + 82, + 310, + 212 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/cc80f22abd648b0263c9273ccf57a87fad8cc6b763268813c8dafc1c7cc14e83.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 314, + 82, + 497, + 212 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/71511929065aefb7be9ca6b4f5a1077fa0d62bedbe3d5c23ded80cd558ab11c9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 497, + 82, + 682, + 212 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/0757a8c88b15053894a803861ac938b15aece754b128ca6c399bcfc30873b808.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 684, + 82, + 870, + 212 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/d80cadc9e8ea52aa5536f11d8ae0db66b4eea2ed0b6e2458c56434863a1b4d6f.jpg", + "image_caption": [ + "Figure 10: Training progression (COMET-22) for multilingual MT-R1-Zero models based on LLaMA-3.1-8B and Qwen2.5-7B across multiple EN-XX test sets, demonstrating applicability in multilingual settings (Section 6.3)." + ], + "image_footnote": [], + "bbox": [ + 122, + 214, + 310, + 344 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/7ac057682468991acec0a5725954084977bdbadd88258f7bb7ab1f266d8066bf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 314, + 214, + 497, + 344 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/e93f56af0eca6a1c191d82bec561e582d8affcd116cfbe1b06aeac5792b12def.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 499, + 214, + 682, + 344 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/2b806a900a9478d666e9030e40c0b37feece8287689e4230effea1f0e3383eb4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 685, + 214, + 870, + 344 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "the SFT baseline – particularly in OOD settings. This pattern is further corroborated by evaluations on the DRT test set (Table 4), a literature translation benchmark (Wang et al., 2024a), where we again observe marginal differences between RL variants but significant gains over SFT. These findings demonstrate that while the tag could facilitate emergent reasoning patterns, the major performance improvements in MT-R1-Zero are primarily from the RL framework itself. This aligns with the intuition that online RL methods, iteratively sampling and evaluating self-generated outputs against quality metrics, principally learn \"how to translate\" that surpass SFT's behavior cloning limitations.", + "bbox": [ + 112, + 400, + 489, + 640 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "6.3 Multilingual and Low-Resource Support", + "text_level": 1, + "bbox": [ + 112, + 652, + 478, + 668 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "To evaluate the broader applicability of our framework, we examine its effectiveness in multilingual training scenarios and its potential benefits for low-resource languages. We train multilingual MT-R1-Zero models using the Germanic language data split in the X-ALMA (Xu et al., 2024), augmented with Chinese (see Table 9 for detailed data statistics). We set the batch size to 16 and used COMET $22^{10}$ as the metric reward (Reward-Sem), consistent with the evaluation protocols in X-ALMA. All models are trained for 1 epoch on 16 NVIDIA H800 80G GPUs for about 12 hours. All other hyperparameters follow the configuration described in Section 4.1. The training progress, measured by", + "bbox": [ + 112, + 673, + 489, + 898 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "COMET-22 for English-to-target directions, is depicted in Figure 10. We also report the XCOMET progression in Figure 11.", + "bbox": [ + 507, + 400, + 884, + 448 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The learning curves demonstrate consistent improvement in translation quality across languages spanning diverse resource levels, including those typically considered low-resource (e.g., Icelandic (IS) and Norwegian (NO)). The steady performance improvement observed throughout training confirms that the MT-R1-Zero framework remains effective when applied in multilingual settings.", + "bbox": [ + 507, + 449, + 884, + 577 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "7 Conclusion", + "text_level": 1, + "bbox": [ + 507, + 589, + 640, + 605 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In this work, we introduced MT-R1-Zero, the first successful adaptation of R1-Zero RL framework to MT using a novel rule-metric mixed reward mechanism that combines format enforcement with quality metrics. Our MT-R1-Zero significantly improves translation quality, achieving leading results on multiple benchmarks, i.e., our 3B models compete with much larger open-source models, while our 7B models are on par with advanced proprietary models. The MT-R1-Zero also demonstrates strong OOD generalization and multilingual applicability. Through extensive experiments and analysis, we highlight the significant impact of reward metric choice for optimization, showcase distinct adaptability across different LLMs, and reveal that performance gains are principally from the RL process itself rather than reasoning steps or morbidity, establishing R1-Zero as a viable and potent paradigm for advancing MT. More broadly, our work high-", + "bbox": [ + 505, + 615, + 884, + 921 + ], + "page_idx": 11 + }, + { + "type": "page_footnote", + "text": "10https://huggingface.co/Unbabel/wmt22-comet-da", + "bbox": [ + 131, + 904, + 443, + 920 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 489, + 941, + 509, + 954 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "lights the great potential of RL for diverse language processing tasks beyond translation.", + "bbox": [ + 112, + 84, + 487, + 116 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Limitations", + "text_level": 1, + "bbox": [ + 114, + 128, + 220, + 142 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "While MT-R1-Zero represents a significant advance, certain limitations remain. The emergent reasoning observed, though diverse, did not achieve the sophisticated iterative self-correction capabilities demonstrated in mathematical reasoning tasks using similar RL or R1-like methods. This discrepancy may reflect fundamental differences in task structure or indicate the need for specialized design in translation tasks. One promising direction would be developing task-specific cold-start datasets for SFT before RL optimization, though this would deviate from the pure RL paradigm we investigated here. Future work could focus on inducing deeper reasoning structures specifically beneficial for the MT task, investigating architectural adaptability across a broader range of LLMs, and developing more appropriate reward mechanisms. Exploring applications to specialized domains (e.g., law and healthcare) and general language processing tasks presents promising opportunities to extend this work.", + "bbox": [ + 112, + 153, + 490, + 491 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 114, + 518, + 213, + 532 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Duarte M Alves, José Pombal, Nuno M Guerreiro, Pedro H Martins, João Alves, Amin Farajian, Ben Peters, Ricardo Rei, Patrick Fernandes, Sweta Agrawal, et al. 2024. Tower: An open multilingual large language model for translation-related tasks. arXiv preprint arXiv:2402.17733.", + "Anthropic. 2024. Claude 3.5 sonnet.", + "Andong Chen, Yuchen Song, Wenxin Zhu, Kehai Chen, Muyun Yang, Tiejun Zhao, et al. 2025. Evaluating o1-like llms: Unlocking reasoning for translation through comprehensive analysis. arXiv preprint arXiv:2502.11544.", + "Marta R Costa-jussà, James Cross, Onur Celebi, Maha Elbayad, Kenneth Heafield, Kevin Heffernan, Elahe Kalbassi, Janice Lam, Daniel Licht, Jean Maillard, et al. 2022. No language left behind: Scaling human-centered machine translation. arXiv preprint arXiv:2207.04672.", + "Ganqu Cui, Lifan Yuan, Zefan Wang, Hanbin Wang, Wendi Li, Bingxiang He, Yuchen Fan, Tianyu Yu, Qixin Xu, Weize Chen, et al. 2025. Process reinforcement through implicit rewards. arXiv preprint arXiv:2502.01456.", + "DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu," + ], + "bbox": [ + 115, + 539, + 489, + 920 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z. F. Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, Aixin Liu, Bing Xue, Bingxuan Wang, Bochao Wu, Bei Feng, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Qu, Hui Li, Jianzhong Guo, Jiashi Li, Jiawei Wang, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, J. L. Cai, Jiaqi Ni, Jian Liang, Jin Chen, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Liang Zhao, Litong Wang, Liyue Zhang, Lei Xu, Leyi Xia, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Meng Li, Miaojun Wang, Mingming Li, Ning Tian, Panpan Huang, Peng Zhang, Qiancheng Wang, Qinyu Chen, Qiushi Du, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, R. J. Chen, R. L. Jin, Ruyi Chen, Shanghao Lu, Shangyan Zhou, Shanhuang Chen, Shengfeng Ye, Shiyu Wang, Shuiping Yu, Shunfeng Zhou, Shuting Pan, S. S. Li, Shuang Zhou, Shaoqing Wu, Shengfeng Ye, Tao Yun, Tian Pei, Tianyu Sun T. Wang, Wangding Zeng, Wanjia Zhao, Wen Liu, Wenfeng Liang, Wenjun Gao, Wenqin Yu, Wentao Zhang W. L. Xiao Wei An Xiaodong Liu Xiaohan Wang Xiaokang Chen Xiaotao Nie, Xin Cheng Xien Liu Xie Xingchao Liu Xinyu Yang Xinyuan Li Xuecheng Su Xuheng Lin X.Q.Li Xiangyue Jin Xiaojin Shen Xiaosha Chen Xiaowen Sun Xiaoxiang Wang Xinnan Song Xinyi Zhou Xianzu Wang Xinxia Shan Y.K. Li Y.Q.WangY.X.Wei Yang Zhang Yanhong Xu Yao Li Yao Zhao Yaofeng Sun Yaohui Wang Yi Yu Yichao Zhang Yifan Shi Yiliang Xiong Ying He Yishi Piao Yisong Wang Yixuan Tan Yiyang Ma Yiyuan Liu Yongqiang Guo Yuan Ou Yuduan Wang Yue Gong Yuheng Zou Yujia He Yunfan Xiong Yuxiang Luo Yuxiang You Yuxuan Liu Yuyang Zhou Y.X.Zhu Yanhong Xu Yanping Huang Yaohui Li Yi Zheng Yuchen Zhu Yunxian Ma Ying Tang Yukun Zha Yuting Yan Z.Z.Ren Zehui Ren Zhangli Sha Zhe Fu Zhean Xu Zhenda Xie Zhengyan Zhang Zhewen Hao Zhicheng Ma Zhigang Yan Zhiyu Wu Zihui Gu Zijia Zhu Zijun Liu Zilin Li Ziwei Xie Ziyang Song Zizheng Pan Zhen Huang Zhipeng Xu Zhongyu Zhang and Zhen Zhang. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. Preprint arXiv:2501.12948.", + "Hugging Face. 2025. Open r1: A fully open reproduction of deepseek-r1.", + "Xidong Feng, Ziyu Wan, Muning Wen, Stephen Marcus McAleer, Ying Wen, Weinan Zhang, and Jun Wang. 2023. Alphazero-like tree-search can guide large language model decoding and training. arXiv preprint arXiv:2309.17179.", + "Zhaopeng Feng, Ruizhe Chen, Yan Zhang, Zijie Meng, and Zuozhu Liu. 2024a. Ladder: A model-agnostic framework boosting LLM-based machine translation" + ], + "bbox": [ + 509, + 85, + 885, + 920 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 489, + 941, + 510, + 954 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "to the next level. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 15377-15393, Miami, Florida, USA. Association for Computational Linguistics.", + "Zhaopeng Feng, Jiahan Ren, Jiayuan Su, Jiamei Zheng, Zhihang Tang, Hongwei Wang, and Zuozhu Liu. 2025. Mt-rewardtree: A comprehensive framework for advancing llm-based machine translation via reward modeling. arXiv preprint arXiv:2503.12123.", + "Zhaopeng Feng, Yan Zhang, Hao Li, Wenqiang Liu, Jun Lang, Yang Feng, Jian Wu, and Zuozhu Liu. 2024b. Improving llm-based machine translation with systematic self-correction. arXiv preprint arXiv:2402.16379.", + "Markus Freitag, George Foster, David Grangier, Viresh Ratnakar, Qijun Tan, and Wolfgang Macherey. 2021. Experts, errors, and context: A large-scale study of human evaluation for machine translation. Transactions of the Association for Computational Linguistics, 9:1460-1474.", + "Markus Freitag, Nitika Mathur, Chi-kiu Lo, Eleftherios Avramidis, Ricardo Rei, Brian Thompson, Tom Kocmi, Frederic Blain, Daniel Deutsch, Craig Stewart, Chrysoula Zerva, Sheila Castilho, Alon Lavie, and George Foster. 2023. Results of WMT23 metrics shared task: Metrics might be guilty but references are not innocent. In Proceedings of the Eighth Conference on Machine Translation, pages 578-628, Singapore. Association for Computational Linguistics.", + "Markus Freitag, Ricardo Rei, Nitika Mathur, Chi-kiu Lo, Craig Stewart, Eleftherios Avramidis, Tom Kocmi, George Foster, Alon Lavie, and André F. T. Martins. 2022. Results of WMT22 metrics shared task: Stop using BLEU – neural metrics are better and more robust. In Proceedings of the Seventh Conference on Machine Translation (WMT), pages 46–68, Abu Dhabi, United Arab Emirates (Hybrid). Association for Computational Linguistics.", + "Kanishk Gandhi, Ayush Chakravarthy, Anikait Singh, Nathan Lile, and Noah D Goodman. 2025. Cognitive behaviors that enable self-improving reasoners, or, four habits of highly effective stars. arXiv preprint arXiv:2503.01307.", + "Aaron Grattafori, Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Alex Vaughan, et al. 2024. The llama 3 herd of models. arXiv preprint arXiv:2407.21783.", + "Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. 2025. rstar-math: Small llms can master math reasoning with self-evolved deep thinking. arXiv preprint arXiv:2501.04519.", + "Nuno M Guerreiro, Ricardo Rei, Daan van Stigt, Luisa Coheur, Pierre Colombo, and Andre FT Martins." + ], + "bbox": [ + 115, + 85, + 489, + 919 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "2024. xcomet: Transparent machine translation evaluation through fine-grained error detection. Transactions of the Association for Computational Linguistics, 12:979-995.", + "Minggui He, Yilun Liu, Shimin Tao, Yuanchang Luo, Hongyong Zeng, Chang Su, Li Zhang, Hongxia Ma, Daimeng Wei, Weibin Meng, et al. 2025. R1-t1: Fully incentivizing translation capability in llms via reasoning learning. arXiv preprint arXiv:2502.19735.", + "Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, and Heung-Yeung Shum Xiangyu Zhang. 2025. Open-reasoner-zero: An open source approach to scaling reinforcement learning on the base model. https://github.com/Open-Reasoner-Zero/Open-Reasoner-Zero.", + "Wenxuan Huang, Bohan Jia, Zijie Zhai, Shaosheng Cao, Zheyu Ye, Fei Zhao, Yao Hu, and Shaohui Lin. 2025. Vision-r1: Incentivizing reasoning capability in multimodal large language models. arXiv preprint arXiv:2503.06749.", + "Tom Kocmi, Eleftherios Avramidis, Rachel Bawden, Ondrej Bojar, Anton Dvorkovich, Christian Federmann, Mark Fishel, Markus Freitag, Thamme Gowda, Roman Grundkiewicz, et al. 2024. Preliminary wmt24 ranking of general mt systems and llms. arXiv preprint arXiv:2407.19884.", + "Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. 2024. Let's verify step by step. In *The Twelfth International Conference on Learning Representations*.", + "Sinuo Liu, Chenyang Lyu, Minghao Wu, Longyue Wang, Weihua Luo, and Kaifu Zhang. 2025. New trends for modern machine translation with large reasoning models. arXiv preprint arXiv:2503.10351.", + "Liangchen Luo, Yinxiao Liu, Rosanne Liu, Samrat Phatale, Harsh Lara, Yunxuan Li, Lei Shu, Yun Zhu, Lei Meng, Jiao Sun, et al. 2024. Improve mathematical reasoning in language models by automated process supervision. arXiv preprint arXiv:2406.06592.", + "OpenAI. 2023. GPT-4: technical work.", + "OpenAI. 2024. Introducing openai o1. https://openai.com/o1/. Accessed: 2024-10-02.", + "Kishore Papineni, Salim Roukos, Todd Ward, and Wei Jing Zhu. 2002. Bleu: a method for automatic evaluation of machine translation. In Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, pages 311-318, Philadelphia, Pennsylvania, USA. Association for Computational Linguistics.", + "Maja Popovic. 2015. chrF: character n-gram F-score for automatic MT evaluation. In Proceedings of the Tenth Workshop on Statistical Machine Translation, pages 392–395, Lisbon, Portugal. Association for Computational Linguistics." + ], + "bbox": [ + 510, + 85, + 882, + 919 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 489, + 942, + 509, + 953 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Matt Post. 2018. A call for clarity in reporting BLEU scores. In Proceedings of the Third Conference on Machine Translation: Research Papers, pages 186-191, Brussels, Belgium. Association for Computational Linguistics.", + "Zhenting Qi, Mingyuan Ma, Jiahang Xu, Li Lyna Zhang, Fan Yang, and Mao Yang. 2024. Mutual reasoning makes smaller llms stronger problem-solvers. arXiv preprint arXiv:2408.06195.", + "Ricardo Rei, Craig Stewart, Ana C Farinha, and Alon Lavie. 2020. Comet: A neural framework for mt evaluation. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 2685-2702.", + "Ricardo Rei, Marcos Treviso, Nuno M Guerreiro, Chrysoula Zerva, Ana C Farinha, Christine Maroti, José GC De Souza, Taisiya Glushkova, Duarte Alves, Luísca Coheur, et al. 2022. Cometkiwi: Ist-unbabel 2022 submission for the quality estimation shared task. In Proceedings of the Seventh Conference on Machine Translation (WMT), pages 634-645.", + "John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. 2017. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347.", + "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. 2024. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300.", + "David Silver, Aja Huang, Chris J. Maddison, Arthur Guez, L. Sifre, George van den Driessche, Julian Schrittwieser, Ioannis Antonoglou, Vedavyas Panneershelvam, Marc Lanctot, Sander Dieleman, Dominik Grewe, John Nham, Nal Kalchbrenner, Ilya Sutskever, Timothy P. Lillicrap, Madeleine Leach, Koray Kavukcuoglu, Thore Graepel, and Demis Hassabis. 2016. Mastering the game of go with deep neural networks and tree search. Nature, 529:484-489.", + "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. 2024. Scaling llm test-time compute optimally can be more effective than scaling model parameters arXiv preprint arXiv:2408.03314.", + "Gemini Team, Petko Georgiev, Ving Ian Lei, Ryan Burnell, Libin Bai, Anmol Gulati, Garrett Tanzer, Damien Vincent, Zhufeng Pan, Shibo Wang, et al. 2024. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530.", + "Kimi Team. 2025a. Kimi k1.5: Scaling reinforcement learning with llms.", + "Qwen Team. 2025b. Qwq-32b: Embracing the power of reinforcement learning." + ], + "bbox": [ + 115, + 85, + 489, + 920 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Jiaan Wang, Fandong Meng, Yunlong Liang, and Jie Zhou. 2024a. Drt-o1: Optimized deep reasoning translation via long chain-of-thought. arXiv preprint arXiv:2412.17498.", + "Yutong Wang, Jiali Zeng, Xuebo Liu, Fandong Meng, Jie Zhou, and Min Zhang. 2024b. Taste: Teaching large language models to translate through self-reflection. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 6144-6158.", + "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837.", + "Violet Xiang, Charlie Snell, Kanishk Gandhi, Alon Balak, Anikait Singh, Chase Blagden, Duy Phung, Rafael Rafailov, Nathan Lile, Dakota Mahan, et al. 2025. Towards system 2 reasoning in llms: Learning how to think with meta chain-of-though. arXiv preprint arXiv:2501.04682.", + "Tian Xie, Zitian Gao, Qingnan Ren, Haoming Luo, Yuqian Hong, Bryan Dai, Joey Zhou, Kai Qiu, Zhirong Wu, and Chong Luo. 2025. Logic-rl: Unleashing llm reasoning with rule-based reinforcement learning. arXiv preprint arXiv:2502.14768.", + "Haoran Xu, Young Jin Kim, Amr Sharaf, and Hany Hassan Awadalla. 2023. A paradigm shift in machine translation: Boosting translation performance of large language models. arXiv preprint arXiv:2309.11674.", + "Haoran Xu, Kenton Murray, Philipp Koehn, Hieu Hoang, Akiko Eriguchi, and Huda Khayrallah. 2024. X-alma: Plug & play modules and adaptive rejection for quality translation at scale. arXiv preprint arXiv:2410.03115.", + "An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. 2024. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115.", + "Edward Yeo, Yuxuan Tong, Morry Niu, Graham Neubig, and Xiang Yue. 2025. Demystifying long chain-of-thought reasoning in llms. arXiv preprint arXiv:2502.03373.", + "Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, et al. 2025. Dapo: An opensource llm reinforcement learning system at scale. arXiv preprint arXiv:2503.14476.", + "Lifan Yuan, Wendi Li, Huayu Chen, Ganqu Cui, Ning Ding, Kaiyan Zhang, Bowen Zhou, Zhiyuan Liu, and Hao Peng. 2024. Free process rewards without process labels. arXiv preprint arXiv:2412.01981." + ], + "bbox": [ + 510, + 85, + 882, + 920 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 489, + 942, + 507, + 954 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Zhiyuan Zeng, Qinyuan Cheng, Zhangyue Yin, Bo Wang, Shimin Li, Yunhua Zhou, Qipeng Guo, Xuanjing Huang, and Xipeng Qiu. 2024. Scaling of search and learning: A roadmap to reproduce o1 from reinforcement learning perspective. arXiv preprint arXiv:2412.14135.", + "bbox": [ + 114, + 85, + 489, + 164 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Yu Zhao, Huifeng Yin, Bo Zeng, Hao Wang, Tianqi Shi, Chenyang Lyu, Longyue Wang, Weihua Luo, and Kaifu Zhang. 2024. Marco-o1: Towards open reasoning models for open-ended solutions. Preprint, arXiv:2411.14405.", + "bbox": [ + 115, + 173, + 489, + 240 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Yaowei Zheng, Richong Zhang, Junhao Zhang, Yanhan Ye, Zheyan Luo, Zhangchi Feng, and Yongqiang Ma. 2024. Llamafactory: Unified efficient finetuning of $100+$ language models. arXiv preprint arXiv:2403.13372.", + "bbox": [ + 115, + 249, + 489, + 315 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A Evaluation Details", + "text_level": 1, + "bbox": [ + 114, + 328, + 312, + 343 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "When evaluating model performance on the test set, we deployed open-source models locally using frameworks like vLLM11 or HuggingFace12 implementations. We use the sampling decoding strategy with a temperature of 0.2, and top_p set to 0.95. The maximum generation length was capped at 1024 tokens. We adipot the prompt showcasing in Table 5 to sample the translation (applying specific chat template when needed).", + "bbox": [ + 112, + 354, + 487, + 500 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B SFT Training Details", + "text_level": 1, + "bbox": [ + 114, + 510, + 332, + 527 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "For the Supervised Fine-Tuning (SFT) baseline compared in the ablation study (Section 6.2), we utilized LLaMA-Factory (Zheng et al., 2024). The SFT process started from the same base model architecture as the corresponding RL experiments (e.g., Qwen2.5-7B) and was performed on the identical parallel translation dataset (13,130 examples from WMT 2017-2020 after filtering, detailed in Section 4.1). The model was fine-tuned on 8 NVIDIA H800 80G GPUs for 2 epochs using a learning rate of 5e-6 and a batch size of 64, totaling approximately 400 training steps.", + "bbox": [ + 112, + 536, + 489, + 730 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/034ce8e5accf4bb3654e49f621aeaacb12a6b6db4cfd17362d73b45299050156.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Inference Prompt
Translate the following text from {src_language} into {tgt_language}. {src_language}:{src_text} {tgt_language}:
", + "bbox": [ + 512, + 131, + 877, + 221 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/e51b8bbce116110722fb78a02080c7e65321d8200b84ae803a6baa624079eac9.jpg", + "table_caption": [ + "Table 5: Prompt used for translation generation. {tgt_language} : target language; {src_language}: source language; {src_text}: the source test sentence." + ], + "table_footnote": [], + "table_body": "
MODELOUT-OF-DISTRIBUTION
EN-JADE-EN (Doc)DE-ZHAvg.
Strong Baseline
Qwen2.5-72B-Instruct73.2569.1369.8970.76
LLaMA3.1-70B-Instruct71.8469.2868.6769.93
Same-size Baseline
Qwen2.5-7B-Instruct64.7967.2067.8266.60
LLaMA-3.1-8B-Instruct62.4266.7764.2864.49
TowerInstruct-7B-v0.258.3369.0365.4564.27
MT-R1-Zero-7B-Lex63.3366.1764.3264.61
MT-R1-Zero-7B-Sem72.0068.4171.5170.64
MT-R1-Zero-7B-Mix69.2768.7468.7468.92
", + "bbox": [ + 512, + 378, + 878, + 519 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/e06054696095bd5b5b33af33f93854ce771daa3bb99d030ff2d6e11f1ddb8da5.jpg", + "table_caption": [ + "Table 6: Out-of-distribution performance comparison using the COMETKiwi metric on EN-JA, DE-EN (Doc), and DE-ZH. (Complements Table 2)." + ], + "table_footnote": [], + "table_body": "
MODELOUT-OF-DISTRIBUTION
EN-JADE-EN (Doc)DE-ZHAvg.
Strong Baseline
Qwen2.5-72B-Instruct25.0245.5440.8337.13
LLaMA3.1-70B-Instruct24.6445.9837.8536.16
Same-size Baseline
Qwen2.5-7B-Instruct18.9141.1735.2531.78
LLaMA-3.1-8B-Instruct16.2240.2831.0829.19
TowerInstruct-7B-v0.210.5243.4034.7429.55
MT-R1-Zero-7B-Lex14.9440.0137.0030.65
MT-R1-Zero-7B-Sem14.1233.1922.8323.38
MT-R1-Zero-7B-Mix20.2743.1721.4128.28
", + "bbox": [ + 512, + 675, + 878, + 816 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Table 7: Out-of-distribution performance comparison using the BLEU metric on EN-JA, DE-EN (Doc), and DE-ZH. (Complements Table 2).", + "bbox": [ + 507, + 825, + 880, + 868 + ], + "page_idx": 15 + }, + { + "type": "page_footnote", + "text": "$^{11}$ https://github.com/vllm-project/vllm \n $^{12}$ https://huggingface.co/docs/transformers/main_classeses/text_generation", + "bbox": [ + 112, + 879, + 396, + 921 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 489, + 941, + 509, + 954 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/0f9aa36facffeb85c2cb9ac5dc34c7b7c25e508328d27a804f4f1b9adf6f3c9f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TrainTest
EN-ZHZH-ENEN-ZHZH-ENEN-JADE-ENDE-ZH
# of cases6565656599719769975491012
SourceWMT 17-20WMT 24WMT 23WMT 24WMT 23Flores
", + "bbox": [ + 114, + 112, + 884, + 186 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/9feac2b9c959db1f4447139b0a49da8626efb382b3b1ce588da0a31cc3132242.jpg", + "table_caption": [ + "Table 8: Data statistics for the training and test sets used in the main experiments (EN $\\rightleftharpoons$ ZH)." + ], + "table_footnote": [], + "table_body": "
Parallel Data
Train (from EN)Train (to EN)Test (from EN)Test (to EN)Resource
Afrikaans (AF)299434110121012Mid
Danish (DA)299435510121012Mid
Dutch (NL)299440310121012High
German (DE)701588510121012High
Icelandic (IS)499467810121012Low
Norwegian (NO)299436010121012Low
Swedish (SV)299433910121012High
Chinese (ZH)690687410121012High
English (EN)-----
", + "bbox": [ + 127, + 279, + 870, + 481 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Table 9: Parallel data statistics for languages used in multilingual experiments (Section 6.3), detailing training/test pairs and resource level classification.", + "bbox": [ + 112, + 491, + 882, + 521 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/12359a347db12f6a28ac40ce29f250c9510c6ae5d91478e5f66234d84e04091a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 127, + 592, + 310, + 722 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/b02385982b62b62aeec9e46e8b146c1ef6e44c85a8a6b24822ba3480eea684cc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 317, + 592, + 495, + 722 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/f05ed6fe4584e687cbc0e0673a93c3498336b87980333857019826514498ecf5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 497, + 592, + 682, + 722 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/b5e0d24285ec67b56aa28877c3937735f3d15f64b819d2c5d0e8cf1401382559.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 687, + 592, + 870, + 722 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/efb78cebd39abe04cdbd765eca2155529d96f3ffc485a6d2f61c9aa3ba7dbc1c.jpg", + "image_caption": [ + "Figure 11: Training progression (reference-free XCOMET score) for multilingual MT-R1-Zero models based on LLaMA-3.1-8B and Qwen2.5-7B across multiple EN-XX test sets (Section 6.3)." + ], + "image_footnote": [], + "bbox": [ + 124, + 724, + 310, + 854 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/9c99c3b95f99c45ba1bf2edfb43f2ce1b3e1fd63b6bcccf788a94d00ce921146.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 317, + 724, + 495, + 854 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/1b62dee80d10c919aac3871669ef0f76618e6c309f1c3e65bd72a56a1dc9961a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 502, + 724, + 682, + 854 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/0083342aef6e593edf76e71c3ab654f7e9cf27116a34b0d643937e75add9d402.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 687, + 724, + 870, + 854 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 489, + 941, + 510, + 954 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "User:\"It's not that bad, right, Kayel?\"", + "bbox": [ + 132, + 115, + 344, + 133 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Assistant: ", + "text_level": 1, + "bbox": [ + 132, + 139, + 238, + 148 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "When approaching the task of translating the given English text into Japanese, my primary goal is to create a faithful and engaging rendition that maintains the structure, tone, and level of detail of the original while ensuring it reads smoothly and naturally in Japanese. I will pay particular attention to preserving the conversational nature of the exchange and the use of question tags (\"right, Kayel?\"), as well as the balance between the two characters' statements.", + "bbox": [ + 132, + 149, + 341, + 272 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "In this case, I will consider the following aspects:", + "bbox": [ + 132, + 279, + 324, + 294 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "1. Maintaining the conversational tone: This involves using appropriate phrasing, word order, and intonation to convey the casual nature of the conversation.", + "bbox": [ + 132, + 303, + 334, + 344 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "2. Translating question tags: Japanese question tags are typically formed differently from English, so I will find an equivalent structure that maintains the flow of the conversation.", + "bbox": [ + 132, + 344, + 341, + 394 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "3. Preserving the balance between the two characters' statements: I will ensure that the translation reflects the equal weight given to both Kayel's response and the speaker's initial statement in the original text.", + "bbox": [ + 132, + 394, + 329, + 451 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "4. Adapting to Japanese sentence structure: Japanese often places the verb at the end of a sentence, so I will adjust the word order accordingly while maintaining the overall meaning and rhythm of the conversation. ", + "bbox": [ + 132, + 451, + 341, + 518 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "", + "bbox": [ + 132, + 518, + 201, + 524 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "“大丈夫!也存在心。久己工儿?”", + "bbox": [ + 132, + 524, + 280, + 531 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "", + "bbox": [ + 132, + 531, + 203, + 539 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "User:好看,但有点点小遗憾", + "text_level": 1, + "bbox": [ + 132, + 545, + 260, + 554 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Assistant: ", + "text_level": 1, + "bbox": [ + 132, + 554, + 238, + 561 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The task at hand involves translating a sentence of Chinese into English while taking into consideration several factors to ensure an effective and appropriate translation. The source text is: “好看,但有点小遗憾。” Given the context in which this sentence is being used, it is clear that the speaker is expressing their positive opinion of something they have just seen or experienced, but also acknowledging a minor flaw or disappointment that slightly", + "bbox": [ + 132, + 562, + 349, + 668 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "diminishes their overall satisfaction. With this in mind, it is important to maintain the same level of nuance and complexity in the translation to effectively convey the same level of emotional and intellectual engagement expected in professional and polished English communication. After careful consideration of these factors and the specific requirements of the translation context, I have decided to render the source text as follows: \"It looks good, but there's a tiny bit of disappointment.\" This translation not only accurately conveys the same level of appreciation and critique as the source text but also does so in a professional and tactful manner that respects the sensitivity and complexity of the situation at hand.", + "bbox": [ + 132, + 669, + 351, + 839 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "", + "bbox": [ + 132, + 839, + 198, + 847 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "\"It looks good, but there's a tiny bit of disappointment.\" ", + "bbox": [ + 132, + 848, + 351, + 865 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "User:北交所融资融券交易的启动是持续推进北交所市场制度建设、完善交易机制、提升二级市场定价功能的重要举措。", + "bbox": [ + 359, + 115, + 610, + 140 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Assistant: ", + "text_level": 1, + "bbox": [ + 359, + 148, + 465, + 156 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "When approaching the task of translating the given Chinese text into English, my primary goal is to create a faithful and engaging rendition that maintains the structure, tone, and level of detail of the original Chinese. I will focus on preserving the balance between providing all relevant information and maintaining readability, while also considering the natural flow of the English language. Additionally, I will pay attention to the following aspects:", + "bbox": [ + 359, + 156, + 606, + 255 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "1. Structure: I will aim to maintain the overall structure of the Chinese text, including the use of paragraphs and sentence order, to ensure that the English translation remains coherent and easy to follow.", + "bbox": [ + 359, + 262, + 600, + 311 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "2. Tone and voice: I will strive to capture the tone and voice of the original text, which appears to be informative and objective, while also considering the appropriate level of formality for the target audience.", + "bbox": [ + 359, + 312, + 606, + 360 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "3. Terminology: I will use accurate and up-to-date terminology related to the stock exchange, financing, and trading mechanisms, ensuring that all terms are clear and easily understood by the English-speaking audience.", + "bbox": [ + 359, + 361, + 606, + 410 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "4. Cultural and linguistic nuances: I will pay attention to any cultural or linguistic nuances present in the Chinese text and find suitable ways to convey them in English, where possible, without compromising the clarity or readability of the translation.", + "bbox": [ + 359, + 410, + 606, + 466 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "5. Number and date formats: I will convert any numbers and date formats from the Chinese system to the corresponding English system, as necessary, to ensure that the translation is both accurate and locally appropriate.", + "bbox": [ + 359, + 467, + 606, + 517 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "6. Punctuation and formatting: I will use standard English punctuation and formatting conventions, while taking into account any specific requirements or preferences for the target audience or publication platform.", + "bbox": [ + 359, + 517, + 606, + 565 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "By considering these aspects, I aim to produce a high-quality English translation that effectively communicates the content and intent of the original Chinese text. ", + "bbox": [ + 359, + 573, + 601, + 614 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "", + "bbox": [ + 359, + 615, + 425, + 621 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "\"The launch of financing and margin trading on the Beijing Stock Exchange is an important step in advancing the development of the exchange's market systems, improving trading mechanisms, and enhancing the secondary market's pricing capabilities.\" ", + "bbox": [ + 359, + 621, + 606, + 680 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "User:加拿大也报告了类似的结果,只是数字较低。", + "text_level": 1, + "bbox": [ + 359, + 686, + 581, + 695 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Assistant: ", + "text_level": 1, + "bbox": [ + 359, + 702, + 465, + 709 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "When approaching the task of translating the given Chinese text into English, my primary goal is to create a faithful and engaging rendition that maintains the structure, tone, and level of detail of the original Chinese while ensuring it reads smoothly and naturally in English. I will focus on preserving the balance between the two sentences, maintaining the use of parallel structure where appropriate, and accurately conveying the nuances of the Chinese phrasing, including the use of numbers and the comparative nature of the information presented.", + "bbox": [ + 359, + 711, + 601, + 832 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "", + "bbox": [ + 359, + 832, + 426, + 840 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "\"Canada also reported similar results, although the numbers were lower.\" ", + "bbox": [ + 359, + 841, + 576, + 866 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "User: to divide the 412.5 meters long, 12.9 meters wide, with a total weight of 15,000 tons of continuous beams into 96 sections.", + "bbox": [ + 616, + 115, + 862, + 140 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Assistant: To translate the given English sentence into Chinese, I would follow these steps to ensure accuracy, fidelity to the original text, and naturalness in the target language (Chinese):", + "bbox": [ + 616, + 148, + 852, + 197 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "1. Understand the context and meaning of the original text: I would first read and re-read the sentence to grasp its full meaning, paying close attention to technical terms, measurements, and the overall structure of the information being conveyed. This ensures a proper translation that maintains the original message.", + "bbox": [ + 616, + 205, + 862, + 271 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "2. Identify key elements and vocabulary: I would identify important numbers, measurements, and specific terms that need to be accurately translated, such as \"412.5 meters,\" \"12.9 meters,\" and \"15,000 tons.\" I would also note any technical terms related to engineering or construction, such as \"continuous beams\" and \"sections.\"", + "bbox": [ + 616, + 272, + 863, + 336 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "3. Consider cultural and contextual nuances: While this particular sentence doesn't contain cultural references that would require adaptation, I would generally be aware of how to translate phrases or expressions that might have different connotations or usages in Chinese culture compared to English.", + "bbox": [ + 616, + 336, + 863, + 401 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "4. Choose an appropriate translation", + "text_level": 1, + "bbox": [ + 616, + 401, + 823, + 410 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "strategy: For this type of technical information, I would likely use a combination of literal translation and necessary modifications to fit Chinese syntax and readability. This might involve reordering words or phrases to sound more natural in Chinese while still maintaining the original meaning.", + "bbox": [ + 616, + 410, + 858, + 475 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "5. Translate while considering the", + "text_level": 1, + "bbox": [ + 616, + 476, + 811, + 483 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "audience: Since this translation is presumably for a Chinese-speaking audience, I would ensure that the translation aligns with how Chinese speakers might express similar ideas, using idiomatic expressions and terminology that would be familiar to them.", + "bbox": [ + 616, + 483, + 862, + 539 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "6. Account for register and tone: Depending on the intended use of the translation (e.g., technical manual, news report, etc.), I would adjust the formality and wording to match the appropriate register. For a technical report, the language would be more formal and precise.", + "bbox": [ + 616, + 541, + 863, + 598 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "7. Translate numbers and measurements: I would carefully translate the numbers and measurements given (412.5 meters, 12.9 meters, 15,000 tons) to their equivalent in Chinese, ensuring that the units of measurement are accurately reflected in the translation (e.g., using \"米\" for meters and \"吨\" for tons).", + "bbox": [ + 616, + 599, + 863, + 663 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "8. Ensure coherence and flow in the translation: After translating each part of the sentence, I would review and refine the translation to ensure that it flows naturally in Chinese, making any necessary adjustments to grammar, syntax, and phrasing to create a smooth and coherent translation.", + "bbox": [ + 616, + 664, + 863, + 728 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "9. Review and make final adjustments:", + "text_level": 1, + "bbox": [ + 616, + 730, + 826, + 738 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Finally, I would review the entire translated sentence for accuracy, fluency, and naturalness in Chinese. I would make any necessary adjustments to improve the translation, checking again against the original text to ensure that all information is preserved and that there are no errors or misunderstandings.", + "bbox": [ + 616, + 739, + 860, + 803 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "By following these steps, I can provide a translation that is accurate, faithful to the original text, and reads naturally in Chinese.", + "bbox": [ + 616, + 810, + 852, + 843 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "将总长412.5米、宽12.9米", + "bbox": [ + 616, + 852, + 847, + 860 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "总重15000吨的连续梁分成96节。", + "bbox": [ + 616, + 860, + 831, + 868 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Figure 12: Qualitative examples (I-V) showcasing the diverse thinking patterns generated by MT-R1-Zero models.", + "bbox": [ + 112, + 883, + 880, + 897 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Ⅲ", + "text_level": 1, + "bbox": [ + 833, + 186, + 857, + 200 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 489, + 942, + 507, + 954 + ], + "page_idx": 17 + } +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10160/b4495eb2-d145-42fa-9b61-d633ab2f00d9_model.json b/data/2025/2504_10xxx/2504.10160/b4495eb2-d145-42fa-9b61-d633ab2f00d9_model.json new file mode 100644 index 0000000000000000000000000000000000000000..2be9f7b5236fb7fd6d43020bb255c170134d2afe --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/b4495eb2-d145-42fa-9b61-d633ab2f00d9_model.json @@ -0,0 +1,4218 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.309, + 0.061, + 0.725 + ], + "angle": 270, + "content": "arXiv:2504.10160v1 [cs.CL] 14 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.177, + 0.09, + 0.823, + 0.131 + ], + "angle": 0, + "content": "MT-R1-Zero: Advancing LLM-based Machine Translation via R1-Zero-like Reinforcement Learning" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.783, + 0.165 + ], + "angle": 0, + "content": "Zhaopeng Feng\\(^{1}\\) Shaosheng Cao\\(^{2\\dagger}\\) Jiahan Ren\\(^{1}\\) Jiayuan Su\\(^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.155, + 0.165, + 0.844, + 0.181 + ], + "angle": 0, + "content": "Ruizhe Chen\\(^{1}\\) Yan Zhang\\(^{1}\\) Zhe Xu\\(^{2}\\) Yao Hu\\(^{2}\\) Jian Wu\\(^{1}\\) Zuozhu Liu\\(^{1\\dagger}\\)" + }, + { + "type": "text", + "bbox": [ + 0.325, + 0.181, + 0.671, + 0.198 + ], + "angle": 0, + "content": "\\(^{1}\\)Zhejiang University \\(^{2}\\)Xiaohongshu Inc." + }, + { + "type": "text", + "bbox": [ + 0.303, + 0.199, + 0.697, + 0.214 + ], + "angle": 0, + "content": "{zhaopeng.23,zuozhuliu}@intl.zju.edu.cn" + }, + { + "type": "text", + "bbox": [ + 0.283, + 0.215, + 0.719, + 0.23 + ], + "angle": 0, + "content": "{caoshaosheng,qiete,xiahou}@xiaohongshu.com" + }, + { + "type": "title", + "bbox": [ + 0.261, + 0.261, + 0.341, + 0.277 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.144, + 0.289, + 0.461, + 0.801 + ], + "angle": 0, + "content": "Large-scale reinforcement learning (RL) methods have proven highly effective in enhancing the reasoning abilities of large language models (LLMs), particularly for tasks with verifiable solutions such as mathematics and coding. However, applying this idea to machine translation (MT), where outputs are flexibly formatted and difficult to automatically evaluate with explicit rules, remains underexplored. In this work, we introduce MT-R1-Zero, the first open-source adaptation of the R1-Zero RL framework for MT without supervised fine-tuning or cold-start. We propose a rule-metric mixed reward mechanism to guide LLMs towards improved translation quality via emergent reasoning. On the WMT 24 English-Chinese benchmark, our MT-R1-Zero3B-Mix achieves competitive performance, surpassing TowerInstruct-7B-v0.2 by an average of 1.26 points. Meanwhile, our MT-R1-Zero7B-Mix attains a high average score of 62.25 across all metrics, placing it on par with advanced proprietary models such as GPT-4o and Claude-3.5-Sonnet, while the MT-R1-Zero7B-Sem variant achieves state-of-the-art scores on semantic metrics. Moreover, our work exhibits strong generalization capabilities on out-of-distribution MT tasks, robustly supporting multilingual and low-resource settings. Extensive analysis of model behavior across different initializations and reward metrics offers pioneering insight into the critical role of reward design, LLM adaptability, training dynamics, and emergent reasoning patterns within the R1-Zero paradigm for MT. Our code is available at https://github.com/fzp0424/MT-R1-Zero." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.825, + 0.26, + 0.84 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.851, + 0.49, + 0.898 + ], + "angle": 0, + "content": "Large-scale Reinforcement Learning (RL) has empowered Large Language Models (LLMs) with strong reasoning capabilities (OpenAI, 2024; Team," + }, + { + "type": "image", + "bbox": [ + 0.519, + 0.26, + 0.875, + 0.463 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.474, + 0.886, + 0.532 + ], + "angle": 0, + "content": "Figure 1: Performance comparison of contemporary LLM-based translation systems on the WMT 24 EN-ZH test set, plotted by average score across BLEU, COMETKiwi, and XCOMET versus model size." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.55, + 0.885, + 0.791 + ], + "angle": 0, + "content": "2025a,b), demonstrating significant success in tasks such as mathematical reasoning or coding in which answers can be clearly verified. In particular, DeepSeek-R1-Zero (DeepSeek-AI et al., 2025) introduced a pure rule-based RL approach that directly fosters emergent reasoning ability without requirements on structured Chain-of-Thought (CoT) data (Wei et al., 2022; Cui et al., 2025) or sophisticated techniques such as Monte Carlo Tree Search (MCTS) (Silver et al., 2016; Luo et al., 2024; Qi et al., 2024; Guan et al., 2025). However, the applicability of these methods to machine translation (MT) remains challenging and underexplored, as MT outputs are flexibly generated and hard to evaluate automatically with explicit rules." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.794, + 0.885, + 0.922 + ], + "angle": 0, + "content": "Recent work has launched attempts to empower LLMs for MT with reasoning capabilities (Chen et al., 2025; Liu et al., 2025). Early studies investigate explicit reasoning methods for improved translation, such as finetuning with CoT (Wang et al., 2024a) or MCTS (Zhao et al., 2024), where advanced multi-step pipelines with self-correction or long-thought agentic mechanisms are further ex" + }, + { + "type": "page_footnote", + "bbox": [ + 0.142, + 0.907, + 0.296, + 0.922 + ], + "angle": 0, + "content": "† Corresponding author." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.943, + 0.504, + 0.955 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.493, + 0.295 + ], + "angle": 0, + "content": "plored (Feng et al., 2024b; Wang et al., 2024b,a). Another line of work leverages RL to empower LLMs for MT through process reward models or supervised finetuning (SFT) with manually annotated CoT data (Feng et al., 2025; He et al., 2025). However, these methods often depend on manually designed or synthetically generated structured CoT data, rely on complex search algorithms, or require explicit multi-stage prompting, leaving the potential of pure RL-based approaches largely unexplored. Furthermore, the performance reported in these studies often lags behind state-of-the-art (SoTA) open-source or proprietary models." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.296, + 0.492, + 0.55 + ], + "angle": 0, + "content": "Developing pure RL methods to directly enhance the reasoning ability of LLMs for better translation requires answering three key questions: 1) Feasibility: How to design R1-Zero-like RL pipelines with effective reward signals to directly solve MT tasks without binary rule-based rewards; 2) Reasoning capability: Could pure RL training cultivate emergent reasoning abilities and induce models to generate explicit thinking patterns for MT, such as multi-step CoT or verification/reflection; 3) Generalizability: Could the training paradigm generalize across different models (e.g., pre-trained base models, instruction-tuned models, or models pretrained on translation data) or diverse downstream settings (e.g., out-of-distribution, multilingual or low-resource scenarios)." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.552, + 0.492, + 0.922 + ], + "angle": 0, + "content": "In this work, we introduce MT-R1-Zero, the first open-source implementation that extends the RL-Zero-like RL training paradigm to MT. We propose a rule-metric mixed reward mechanism that adapts the original rule-based reward concept to effectively guide training in MT scenarios. We explore different rewards optimizing over lexical (Lex), semantic (Sem), and Lex-Sem mixed (Mix) objectives to guide LLMs towards improved translation quality via emergent reasoning. Our experiments demonstrate the efficacy of this approach: as RL training progresses, our MT-R1-Zero-3B-Mix achieves competitive performance, surpassing TowerInstruct-7B-v0.2 by an average of 1.26 points across all metrics (BLEU, COMETKiwi, XCOMET) on the WMT 24 English-Chinese (EN-ZH) benchmark. Meanwhile, our MT-R1-Zero-7B-Mix surpasses LLaMA-3.1-70B by an average of 1.24 points and Qwen2.5-72B by 0.48 points, even on par with top proprietary models such as GPT-4o and Claude-3.5-Sonnet. The MT-R1-Zero further demonstrate promising generalizability across multilingual and low-resource settings." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.887, + 0.326 + ], + "angle": 0, + "content": "Extensive experiments further provide key findings and insight into the adaptation of R1-Zero paradigm to MT. First, we empirically demonstrate that the choice of metric reward plays a pivotal role in steering RL optimization and translation style (semantic or lexical) (Finding 1). Further analysis reveals that MT-R1-Zero induces diverse emergent reasoning patterns, including dynamic language-of-thought transition during translation (Findings 2 and 3). We also identify distinct RL adaptability of different base LLMs (Finding 4). Ablation studies suggest that the pure RL process alone can lead to substantial translation improvements, independent of thinking morbidity (Section 6). Our core contributions are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.337, + 0.887, + 0.402 + ], + "angle": 0, + "content": "- We present the first open-source implementation of the DeepSeek-R1-Zero paradigm for MT, achieving superior performance across indomain, OOD and generalization MT tasks." + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.411, + 0.887, + 0.475 + ], + "angle": 0, + "content": "- Our analysis reveals key findings and recipes for effective R1-Zero adaptation to MT, including reward metric selection, emergent reasoning patterns, training dynamics and LLM adaptability." + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.485, + 0.887, + 0.583 + ], + "angle": 0, + "content": "- Extensive experiments and ablations show that pure RL serves as the primary driver of MT improvements, with minimal dependence on forced reasoning or output length, highlighting the significant potential of RL for diverse translation applications and broader language tasks." + }, + { + "type": "list", + "bbox": [ + 0.51, + 0.337, + 0.887, + 0.583 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.591, + 0.666, + 0.607 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.616, + 0.886, + 0.923 + ], + "angle": 0, + "content": "LLM Reasoning with Post-training. Recent research indicates that scaling test-time computation can significantly enhance the ability of LLMs to tackle complex reasoning tasks (OpenAI, 2024; Zeng et al., 2024; Xiang et al., 2025). Many approaches rely on sophisticated techniques such as step-level process reward models (PRMs) that provide granular feedback (Lightman et al., 2024; Yuan et al., 2024; Snell et al., 2024) or MCTS to explore potential reasoning paths (Feng et al., 2023; Qi et al., 2024; Guan et al., 2025). A recent alternative, DeepSeek-R1-Zero (DeepSeek-AI et al., 2025), demonstrated that large-scale pure RL, guided only by formatting rules and correctness of final predictions (rule-based reward), can motivate LLMs to develop self-emergent reasoning processes for complex reasoning tasks. Subsequent work (Hu et al., 2025; Face, 2025) successfully replicated this training paradigm in open-source" + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.943, + 0.506, + 0.955 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.49, + 0.246 + ], + "angle": 0, + "content": "models, focusing on mathematical domains. Xie et al. (2025) further demonstrated the effectiveness and generalization capabilities of the R1-Zero paradigm using logic reasoning game problems, while Huang et al. (2025) explored its potential for vision reasoning. Despite its potential, the application of the R1-Zero RL paradigm to complex generation tasks like MT, in which the accuracy/quality of outputs is not rule-based and difficult to validate automatically, remains an open question." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.248, + 0.49, + 0.617 + ], + "angle": 0, + "content": "LLM Reasoning for MT. Leveraging reasoning to improve MT has garnered increasing attention, as systematically explored in Chen et al. (2025) and Liu et al. (2025). Previous work have designed multi-step processes for MT, e.g., Feng et al. (2024b) introduced an API-based self-correcting framework, and Wang et al. (2024b) employed multi-task training followed by a multistage inference phase. Wang et al. (2024a) integrated a similar procedure into inference-time CoT, using a multi-agent mechanism to synthesize long CoT prompts for English-Chinese literary translation. Efforts have also focused on reward modeling for MT reasoning. Feng et al. (2025) constructed implicit process reward models for translation and explored their effectiveness when combined with test-time search. Recent study further evaluated explicit reasoning for MT using CoT fine-tuning and MCTS to expand test-time computation (Zhao et al., 2024). He et al. (2025) demonstrated that models can acquire reasoning-based translation capabilities through multi-stage training with manually constructed CoT templates." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.619, + 0.49, + 0.764 + ], + "angle": 0, + "content": "However, these existing methods often necessitate manually designed or synthetically generated structured CoT data, rely on complex search algorithms (MCTS), or require explicit multi-stage prompting (self-correction). The effectiveness of large-scale pure RL training paradigms such as R1-Zero remains unexplored. Furthermore, the performance reported in these studies often lags behind state-of-the-art open-source or proprietary models." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.78, + 0.219, + 0.795 + ], + "angle": 0, + "content": "3 Method" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.809, + 0.49, + 0.922 + ], + "angle": 0, + "content": "In this section, we present our method that trains a translation model with pure RL using a hybrid reward model. Unlike tasks with fixed correct answers, translation allows for multiple valid outputs, making the evaluation more complicated. In this work, we introduce a rule-metric mixed reward that integrates reasoning format checking with multi-" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.885, + 0.15 + ], + "angle": 0, + "content": "ple translation quality assessment metrics, which is used within the Group Relative Policy Optimization (GRPO) (Shao et al., 2024) algorithm to ensure stable and efficient RL training." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.161, + 0.774, + 0.175 + ], + "angle": 0, + "content": "3.1 Rule-Metric Mixed Reward" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.182, + 0.885, + 0.504 + ], + "angle": 0, + "content": "In RL, the reward is the main signal that drives model training. DeepSeek-R1-Zero (DeepSeek-AI et al., 2025) employs simple rule-based rewards that check whether the final answer is correct and whether the response follows a specific format. This works well for tasks with fixed format correct answers such as math or coding. However, there is often no single \"correct\" output for MT, impeding the design of rule-based rewards. Fortunately, the MT community has developed many evaluation metrics to measure translation quality. Recent advancements in automated MT evaluation metrics have shown promise in aligning automated assessments with human translation quality judgments (Freitag et al., 2022, 2023). Thus, we design a rule-metric mixed reward, which consists of two parts: a Format Reward that checks output structure, and a Metric Reward that evaluates translation quality. We use a structured prompt template similar to that in DeepSeek-R1-Zero:" + }, + { + "type": "title", + "bbox": [ + 0.537, + 0.516, + 0.746, + 0.531 + ], + "angle": 0, + "content": "Template for MT-R1-Zero" + }, + { + "type": "text", + "bbox": [ + 0.534, + 0.542, + 0.86, + 0.768 + ], + "angle": 0, + "content": "A conversation between User and Assistant. The User asks for a translation from {src_language} to {tgt_language}, and the Assistant solves it. The Assistant first thinks about the reasoning process in the mind and then provides the user with the final translation. The reasoning process and final translation are enclosed within and tags, respectively, i.e., reasoning process here final translation here . \nUser:{src_text} \nAssistant:" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.784, + 0.885, + 0.832 + ], + "angle": 0, + "content": "Here, src_language and tgt_language indicate the source and target languages, and src_text denotes the source text requiring translation." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.842, + 0.886, + 0.921 + ], + "angle": 0, + "content": "Format Reward: We use regular expression extraction to enforce a structured response format. The model is required to place its reasoning process within tags and provide the final translation inside " + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.943, + 0.505, + 0.954 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.49, + 0.117 + ], + "angle": 0, + "content": "tags. The format reward score \\((S_{\\text{format}})\\) is computed as:" + }, + { + "type": "equation", + "bbox": [ + 0.152, + 0.14, + 0.449, + 0.182 + ], + "angle": 0, + "content": "\\[\nS _ {f o r m a t} = \\left\\{ \\begin{array}{l l} 1, & \\text {i f f o r m a t i s c o r r e c t} \\\\ - 1, & \\text {i f f o r m a t i s i n c o r r e c t} \\end{array} \\right.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.191, + 0.49, + 0.273 + ], + "angle": 0, + "content": "Metric Reward: This reward evaluates the quality of model's translation, but only if the response format is correct. We use automatic evaluation metrics to calculate a translation quality score \\( S_{\\text{metric}} \\). We explore three approaches to compute \\( S_{\\text{metric}} \\):" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.282, + 0.49, + 0.41 + ], + "angle": 0, + "content": "1. N-gram Lexical Matching Reward (RewardLex): Metrics such as BLEU (Papineni et al., 2002) orchrF (Popovic, 2015) evaluate translation quality by measuring the difference (primarily lexical overlap) between the translation and the human-written reference. In our experiments, we employ the BLEU score calculated via the sacrebleu\\(^{1}\\)." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.422, + 0.49, + 0.597 + ], + "angle": 0, + "content": "2. Semantic and Contextual Reward (Reward-Sem): Learning-based metrics like COMET (Rei et al., 2020) and COMETKiwi (Rei et al., 2022) are trained on human judgments (e.g., MQM quality assessments (Freitag et al., 2021)). These metrics can recognize good translations even if the wording differs from the reference, as long as the meaning is preserved. We use the COMETKiwi- \\(23^{2}\\), which was used in the WMT 24 (Kocmi et al., 2024) and only needs the source sentence and the model's translation." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.609, + 0.489, + 0.705 + ], + "angle": 0, + "content": "3. Lexical and Semantic Mixed Reward (Reward-Mix): To capture both lexical fidelity and semantic adequacy, we use a hybrid reward (Reward-Mix) that adds together Lexical Matching Reward (Reward-Lex) and Semantic and Contextual Reward (Reward-Sem)." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.282, + 0.49, + 0.705 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.717, + 0.488, + 0.749 + ], + "angle": 0, + "content": "Accordingly, the computation of \\( S_{\\text{metric}} \\) depends on the selected reward configuration:" + }, + { + "type": "equation", + "bbox": [ + 0.115, + 0.759, + 0.504, + 0.803 + ], + "angle": 0, + "content": "\\[\nS _ {m e t r i c} = \\left\\{ \\begin{array}{l l} \\mathrm {B} (\\text {t r a n s}, \\text {r e f}), & \\text {i f R e w a r d - L e x} \\\\ \\mathrm {C K} (\\text {s r c}, \\text {t r a n s}) & \\text {i f R e w a r d - S e m} \\\\ \\mathrm {B} (\\text {t r a n s}, \\text {r e f}) + \\mathrm {C K} (\\text {s r c}, \\text {t r a n s}), & \\text {i f R e w a r d - M i x} \\end{array} \\right.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.813, + 0.49, + 0.876 + ], + "angle": 0, + "content": "where B denotes normalized BLEU score, CK denotes the COMETKiwi score, trans is the generated translation, ref is the reference translation, and src is the source text." + }, + { + "type": "page_footnote", + "bbox": [ + 0.114, + 0.882, + 0.483, + 0.92 + ], + "angle": 0, + "content": "\\(^{1}\\)https://github.com/mjpost/sacrebleu \n\\(^{2}\\)https://huggingface.co/Unbabel/wmt23-cometkiwi-da-xl" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.883, + 0.15 + ], + "angle": 0, + "content": "Rule-Metric Mixed Reward: The final reward \\( r \\) combines both the format reward (\\( S_{\\text{format}} \\)) and the metric reward (\\( S_{\\text{metric}} \\)). Formally, it is calculated using the following rule:" + }, + { + "type": "equation", + "bbox": [ + 0.526, + 0.171, + 0.863, + 0.213 + ], + "angle": 0, + "content": "\\[\nr = \\left\\{ \\begin{array}{l l} S _ {f o r m a t} - 2, & \\text {i f} S _ {f o r m a t} = - 1 \\\\ S _ {f o r m a t} + S _ {m e t r i c}, & \\text {i f} S _ {f o r m a t} = 1 \\end{array} \\right.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.221, + 0.885, + 0.479 + ], + "angle": 0, + "content": "where \\( S_{\\text{metric}} \\) is calculated only if the response format is correct. \\( S_{\\text{format}} = 1 \\). If the format is incorrect (\\( S_{\\text{format}} = -1 \\)), we skip the metric reward evaluation and assign a fixed penalty (e.g., 2) to discourage format violations. This setup encourages the model to first learn the correct output structure. When the format is correct, the final reward becomes \\( r = 1 + S_{\\text{metric}} \\). Unlike traditional rule-based rewards that give a fixed score for correct outputs, our approach uses a continuous metric score. This means the reward can vary within the [1, 2] or [1, 3] range, depending on translation quality. As a result, the model receives more detailed feedback and can learn to improve even small differences in translation quality across correctly formatted outputs." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.489, + 0.669, + 0.505 + ], + "angle": 0, + "content": "3.2 RL Algorithm" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.51, + 0.884, + 0.673 + ], + "angle": 0, + "content": "We use the Group Relative Policy Optimization (GRPO) algorithm (Shao et al., 2024) to train the translation model with our rule-metric mixed reward. In each training step, for a given translational question \\( q \\), we sample a group of candidate outputs \\( \\{o_1, o_2, \\dots, o_G\\} \\) from the policy model \\( \\pi_{\\theta_{old}} \\). \\( A_i = \\frac{r_i - \\mathrm{mean}(\\{r_1, r_2, \\dots, r_G\\})}{\\mathrm{std}(\\{r_1, r_2, \\dots, r_G\\})} \\) is the computed advantage using the group rule-metric mixed rewards \\( \\{r_1, r_2, \\dots, r_G\\} \\). GRPO then maximizes the following objective function to optimize \\( \\pi_\\theta \\):" + }, + { + "type": "equation", + "bbox": [ + 0.52, + 0.682, + 0.883, + 0.841 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} J _ {\\mathrm {G R P O}} (\\theta) = \\mathbb {E} _ {q \\sim P (Q), \\{o _ {i} \\} _ {i = 1} ^ {G} \\sim \\pi_ {\\theta_ {\\mathrm {o l d}}} (O | q)} \\\\ \\left[ \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\min \\left(\\frac {\\pi_ {\\theta} (o _ {i} \\mid q)}{\\pi_ {\\theta_ {\\mathrm {o l d}}} (o _ {i} \\mid q)} A _ {i}, \\right. \\right. \\\\ \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta} (o _ {i} \\mid q)}{\\pi_ {\\theta_ {\\mathrm {o l d}}} (o _ {i} \\mid q)}, 1 - \\varepsilon , 1 + \\varepsilon\\right) A _ {i}\\left. \\right) \\\\ \\left. - \\beta D _ {\\mathrm {K L}} \\left(\\pi_ {\\theta} \\| \\pi_ {\\text {r e f}}\\right) \\right], \\tag {1} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.842, + 0.884, + 0.922 + ], + "angle": 0, + "content": "where \\(\\varepsilon\\) and \\(\\beta\\) are hyperparameters controlling the PPO clipping threshold and the weight of the Kullback-Leibler (KL) divergence penalty (Schulman et al., 2017; Shao et al., 2024), respectively. Specifically, \\(\\varepsilon\\) determines the permissible range for policy" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.943, + 0.505, + 0.955 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.114, + 0.085, + 0.49, + 0.189 + ], + "angle": 0, + "content": "updates, while \\(\\beta\\) regulates the magnitude of the KL penalty during training to prevent excessive policy shifts from the reference policy \\(\\pi_{ref}\\) (typically the initialization of \\(\\pi_{\\theta}\\)). \\(D_{KL}(\\pi_{\\theta} \\| \\pi_{\\mathrm{ref}}) = \\frac{\\pi_{\\mathrm{ref}}(o_i|q)}{\\pi_{\\theta}(o_i|q)} - \\log \\left(\\frac{\\pi_{\\mathrm{ref}}(o_i|q)}{\\pi_{\\theta}(o_i|q)}\\right) - 1\\) is the KL divergence approximation term." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.202, + 0.262, + 0.218 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.229, + 0.318, + 0.244 + ], + "angle": 0, + "content": "4.1 Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.25, + 0.49, + 0.523 + ], + "angle": 0, + "content": "Dataset and Benchmarks. Our primary experimental focus is on English (EN) and Chinese (ZH). Following Xu et al. (2023) and Feng et al. (2024a), we collect parallel examples \\((\\mathrm{EN} \\rightleftharpoons \\mathrm{ZH})\\) sourced from WMT 2017 through WMT 2020. We apply a filter to exclude sentences containing fewer than 30 characters, leading to a final training set of 13,130 examples. For evaluation, we assess performance on two in-domain translation tasks using recent WMT benchmarks: EN-ZH (WMT \\(24^{3}\\)) and ZHEN (WMT \\(23^{4}\\)). Additionally, we evaluate generalization capabilities on three out-of-distribution (OOD) translation directions: English-Japanese (EN-JA, WMT 2024), German-English (DE-EN, WMT 2023 Document-level), and German-Chinese (DE-ZH, Flores-200 (Costa-jussa et al., 2022)). Detailed statistics are presented in Table 8." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.524, + 0.49, + 0.685 + ], + "angle": 0, + "content": "Baselines. Our primary baselines encompass leading proprietary models, namely Claude-3.5-Sonnet (Anthropic, 2024), GPT-4o (OpenAI, 2023), and Gemini-1.5-Pro (Team et al., 2024), alongside advanced open-source models such as the Qwen2.5 series (Yang et al., 2024), LLaMA-3.1 series (Grattafori et al., 2024), and the translation-specific Tower family (Alves et al., 2024). Proprietary models were accessed via their APIs5. More evaluation details can be found in Appendix A." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.686, + 0.49, + 0.831 + ], + "angle": 0, + "content": "Evaluation Metrics. We assess translation quality using a suite of three complementary metrics: the lexical metric BLEU (Post, 2018), the reference-free learning-based metric COMETKiwi (Rei et al., 2022) (COMETKiwi-23-XL), and the reference-based learning-based metric XCOMET (Guerreiro et al., 2024) (XCOMET-XL). Together, these metrics provide a comprehensive view by evaluating both lexical fidelity and semantic adequacy." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.832, + 0.488, + 0.847 + ], + "angle": 0, + "content": "Training Details. Our implementation is based on" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.084, + 0.885, + 0.358 + ], + "angle": 0, + "content": "the verl\\(^{6}\\) framework. We selected the Qwen2.5-base series (3B and 7B parameter variants) as starting models for MT-R1-Zero training. During training, we configure a batch size of 8 and utilize 8 rollouts per prompt within the GRPO algorithm. We employ a constant learning rate of 5e-7 and set the sampling temperature to 1.0. The maximum generation length for responses is capped at 1024 tokens. We set the KL penalty coefficient \\(\\beta\\) to 0, thereby removing the KL constraint against the reference policy. This decision stems from our empirical observation that the KL penalty tends to restrict the model's exploration of diverse response lengths, which we will discuss further in Section 6.1. The PPO clipping range \\(\\epsilon\\) is set to 0.2. All models are trained for 1 epoch on 4 NVIDIA H800 80G GPUs for about 13 hours." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.373, + 0.662, + 0.387 + ], + "angle": 0, + "content": "4.2 Main Results" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.395, + 0.885, + 0.814 + ], + "angle": 0, + "content": "In-Domain Performance. Our models show substantial gains over their corresponding base versions, and exhibit competing performance compared to existing SoTA benchmarks (Table 1). On the EN-ZH direction, our MT-R1-Zero-7B-Mix on the average score (62.25) also surpasses GPT-4o (61.86) and Qwen2.5-72B (61.77). In addition, the MT-R1-Zero-7B-Sem achieves the best semantic-level performance on EN-ZH, scoring 72.07 on COMETKiwi and 79.37 on XCOMET. This surpasses the strongest proprietary model, Claude3.5-Sonnet, by 1.68 COMETKiwi points and exceeds the best listed open-source model, Qwen2.5-72B, by more than 3 points. On the ZH-EN direction, MT-R1-Zero-7B-Mix is also highly competitive. Our MT-R1-Zero-7B-Sem achieves a COMETKiwi score of 71.66, which is comparable to the top closed models (Claude-3.5-Sonnet 71.69, GPT-4o 71.63) and surpasses strong open-source models such as LLaMA-3.1-70B (70.43) and Qwen2.5-72B (70.95). Furthermore, the MT-R1-Zero-3B-Sem delivers impressive performance for its scale. It scores 69.75 COMETKiwi on EN-ZH, which is approximately 1.7 points higher than the much larger LLaMA-3.1-70B and over 0.7 points above Qwen2.5-72B." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.815, + 0.885, + 0.895 + ], + "angle": 0, + "content": "Out-of-Distribution Performance. Table 2 reports the XCOMET of our models on OOD language pairs with a zero-shot setting (models trained only on EN-ZH/ZH-EN). Despite this challenging setup, our models exhibit strong generaliza" + }, + { + "type": "page_footnote", + "bbox": [ + 0.137, + 0.857, + 0.47, + 0.87 + ], + "angle": 0, + "content": "3https://www2.statmt.org/wmt24/translation-task.html" + }, + { + "type": "page_footnote", + "bbox": [ + 0.137, + 0.871, + 0.468, + 0.883 + ], + "angle": 0, + "content": "4https://www2.statmt.org/wmt23/translation-task.html" + }, + { + "type": "page_footnote", + "bbox": [ + 0.116, + 0.884, + 0.487, + 0.921 + ], + "angle": 0, + "content": "5The specific proprietary models accessed include Anthropic's claude-3-5-sonnet-20241022, OpenAI's gpt-4o-2024-08-06, and Google's gemini-1.5-pro." + }, + { + "type": "list", + "bbox": [ + 0.116, + 0.857, + 0.487, + 0.921 + ], + "angle": 0, + "content": null + }, + { + "type": "page_footnote", + "bbox": [ + 0.531, + 0.907, + 0.751, + 0.921 + ], + "angle": 0, + "content": "\\(^{6}\\)https://github.com/volcengine/verl" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.943, + 0.505, + 0.954 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.118, + 0.082, + 0.88, + 0.4 + ], + "angle": 0, + "content": "
MODELZH-ENEN-ZH
BLEUCOMETKiwiXCOMETAvg.BLEUCOMETKiwiXCOMETAvg.
Closed
Claude-3.5-Sonnet (2024/10)22.5571.6987.3260.5238.6370.3978.2462.42
GPT-4o (2024/08)22.5771.6387.2260.4741.1369.0175.4361.86
Gemini-1.5-Pro (2025/03)18.3469.2385.5557.7139.8267.4776.2661.18
Open
General Purpose LLMs
LLaMA-3.1-70B-Instruct25.1970.4386.2160.6139.8268.0575.1761.01
Qwen2.5-72B-Instruct21.9670.9587.0759.9939.2969.0476.9761.77
Qwen2.5-32B-Instruct20.5469.3585.4758.4536.3668.4374.9059.90
Translation-Specific LLMs
TowerInstruct-13B-v0.124.7270.1785.6960.1937.0666.2273.1358.80
TowerInstruct-7B-v0.223.3269.9984.9359.4134.9364.0470.6756.55
Ours
Qwen2.5-3B-Base14.2664.8676.7651.9615.9052.0567.1345.03
MT-R1-Zero-3B-Lex21.5366.3381.6956.5233.7060.5865.6753.32
MT-R1-Zero-3B-Sem18.4170.3385.9858.2424.3269.7576.9257.00
MT-R1-Zero-3B-Mix22.5468.8484.0858.4936.2765.0572.1057.81
Qwen2.5-7B-Base18.2368.2784.9957.1631.1463.3869.8354.78
MT-R1-Zero-7B-Lex23.5665.3582.1257.0140.1164.5770.2158.30
MT-R1-Zero-7B-Sem16.6271.6686.0758.1223.0772.0779.3758.17
MT-R1-Zero-7B-Mix23.9870.8186.1760.3240.9769.4376.3662.25
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.408, + 0.884, + 0.466 + ], + "angle": 0, + "content": "Table 1: Performance comparison on in-domain translation directions (EN-ZH, ZH-EN) using BLEU, COMETKiwi, and XCOMET metrics, with average metric scores (Avg.). MT-R1-Zero variants (-Lex, -Sem, -Mix) are compared against closed and open baselines, which are further categorized by accessibility and specialization. The -Mix variant often achieves the best balance, while -Sem reaches peak semantic scores." + }, + { + "type": "table", + "bbox": [ + 0.118, + 0.488, + 0.484, + 0.629 + ], + "angle": 0, + "content": "
MODELOUT-OF-DISTRIBUTION
EN-JADE-EN (DOC)DE-ZHAvg.
Strong Baseline
Qwen2.5-72B-Instruct76.8689.5188.4284.93
LLaMA3.1-70B-Instruct75.6788.7287.4283.94
Same-size Baseline
Qwen2.5-7B-Instruct63.7487.4584.4378.54
LLaMA-3.1-8B-Instruct64.5086.8482.2377.86
TowerInstruct-7B-v0.256.7389.4784.2876.83
MT-R1-Zero-7B-Lex60.6585.2583.8676.59
MT-R1-Zero-7B-Sem71.9587.6887.6682.43
MT-R1-Zero-7B-Mix68.4988.6988.6981.96
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.639, + 0.489, + 0.682 + ], + "angle": 0, + "content": "Table 2: Out-of-distribution performance comparison using the XCOMET metric on EN-JA, DE-EN (Document-level), and DE-ZH." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.712, + 0.49, + 0.922 + ], + "angle": 0, + "content": "tion. The MT-R1-Zero-7B-Sem achieves the highest average XCOMET score (82.43) across the OOD tasks, reaching top scores on EN-JA (71.95) and DE-EN (87.68). The MT-R1-Zero-7B-Mix also demonstrates highly competitive generalization with an average score of 81.96, and secures the highest score on DE-ZH (88.69). While these variants do not consistently surpass the much larger strong baselines (Qwen2.5-72B Avg. 84.93, LLaMA3.1-70B Avg. 83.94), they are still highly competitive. Crucially, MT-R1-Zero-7B-Sem and -Mix significantly outperform all same-size baselines (Qwen2.5-7B-Instruct Avg. 78.54, LLaMA" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.491, + 0.885, + 0.604 + ], + "angle": 0, + "content": "3.1-8B-Instruct Avg. 77.86, TowerInstruct-7B-v0.2 Avg. 76.83) by a considerable margin (at least 3.4 points). These OOD results suggest that the quality improvements in MT-R1-Zero can effectively transfer to unseen language pairs. Results using COMETKiwi and BLEU are also provided in Appendix Tables 6 and 7, respectively." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.617, + 0.766, + 0.634 + ], + "angle": 0, + "content": "5 Key Findings and Insight" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.645, + 0.884, + 0.724 + ], + "angle": 0, + "content": "Based on our extensive experiments adapting the R1-Zero paradigm to MT, we identify several key findings regarding the underlying mechanisms, design ideas, and emergent behaviors of our MT-R1-Zero framework." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.738, + 0.833, + 0.754 + ], + "angle": 0, + "content": "5.1 Impact of Reward Metric Selection" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.759, + 0.884, + 0.84 + ], + "angle": 0, + "content": "As detailed in Section 3.1, we explore three metric rewards: Reward-Lex, Reward-Sem, and Reward-Mix. Our results demonstrate that the choice among these significantly affects the learning target and final model outputs, as stated in Finding 1." + }, + { + "type": "text", + "bbox": [ + 0.534, + 0.859, + 0.86, + 0.907 + ], + "angle": 0, + "content": "Finding 1: Reward metric selection critically shapes optimization targets and translation style." + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.943, + 0.506, + 0.955 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.125, + 0.082, + 0.382, + 0.24 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.38, + 0.082, + 0.626, + 0.24 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.626, + 0.082, + 0.871, + 0.24 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.114, + 0.251, + 0.885, + 0.279 + ], + "angle": 0, + "content": "Figure 2: Training dynamics using Reward-Lex, Reward-Sem, and Reward-Mix, evaluated with COMETKiwi, BLEU, and XCOMET." + }, + { + "type": "image", + "bbox": [ + 0.126, + 0.283, + 0.877, + 0.602 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.114, + 0.613, + 0.884, + 0.644 + ], + "angle": 0, + "content": "Figure 3: Qualitative examples illustrates the effect of different reward functions (Reward-Lex, Reward-Sem, Reward-Mix) on EN-ZH translation, where the stylistic differences are driven by reward optimization (Finding 1)." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.658, + 0.49, + 0.915 + ], + "angle": 0, + "content": "Figure 2 presents the training dynamics with different rewards. Training with Reward-Lex maximizes BLEU scores, often at the expense of semantic scores, while Reward-Sem maximizes COMETKiwi, leading to a decline in BLEU. Training with Reward-Mix improves both metrics, with a trade-off of achieving sub-optimal COMETKiwi compared to Reward-Sem. Independent evaluation with XCOMET further supports this finding, showing consistent improvements for Sem and Mix variants while fluctuating for Lex. This finding aligns with the insight from Chen et al. (2025), suggesting that lexical and semantic assessments are complementary, particularly for reasoning-oriented LLMs, and combining them can offer a more comprehensive evaluation signal." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.658, + 0.885, + 0.881 + ], + "angle": 0, + "content": "Qualitatively (Figure 3), this optimization alignment manifests as distinct translation styles. BLEU optimization encourages literal, n-gram focused translations, potentially sacrificing nuance. COMETKiwi optimization fosters translations that prioritize semantic faithfulness, even if lexically divergent from references. In contrast, the mixed reward yields balanced translations. This demonstrates that the metric reward fundamentally dictates the nature of the translation quality learned (e.g., semantic v.s. lexical). Therefore, careful metric selection and deliberate fusion are essential for tailoring RL-based MT refinement towards specific and desired translations." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.943, + 0.504, + 0.953 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.149, + 0.084, + 0.496, + 0.254 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.083, + 0.848, + 0.253 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.266, + 0.885, + 0.31 + ], + "angle": 0, + "content": "Figure 4: Training dynamics of MT-R1-Zero models (using Reward-Sem). Left: COMETKiwi score progression for 3B and 7B models on EN-ZH and ZH-EN test sets. Right: Average response length changes over training steps, exhibiting the classic decrease-then-increase pattern (Finding 2)." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.322, + 0.429, + 0.334 + ], + "angle": 0, + "content": "Source (ZH):至于电量的详细说明,一般人看着心里会发虚。" + }, + { + "type": "text", + "bbox": [ + 0.459, + 0.319, + 0.799, + 0.338 + ], + "angle": 0, + "content": "Reference (EN):The majority of individuals will feel guilty after reading the in-depth description of the power." + }, + { + "type": "title", + "bbox": [ + 0.472, + 0.348, + 0.523, + 0.36 + ], + "angle": 0, + "content": "Step 0" + }, + { + "type": "text", + "bbox": [ + 0.144, + 0.359, + 0.374, + 0.368 + ], + "angle": 0, + "content": "User:至于电量的详细说明,一般人看着心里会发虚。" + }, + { + "type": "text", + "bbox": [ + 0.146, + 0.368, + 0.846, + 0.392 + ], + "angle": 0, + "content": "Assistant: To translate the given Chinese sentence into English, I will first break down the sentence into its components and then find the appropriate English equivalents for each component. The sentence is: \"至于电量的详细说明,一般人看着心里会发虚。\"" + }, + { + "type": "text", + "bbox": [ + 0.146, + 0.392, + 0.61, + 0.402 + ], + "angle": 0, + "content": "1. 至于 - This is a preposition that can be translated to \"as for\" or \"regarding.\"" + }, + { + "type": "text", + "bbox": [ + 0.146, + 0.402, + 0.539, + 0.411 + ], + "angle": 0, + "content": "2. 电量 - This is a compound noun that means \"electricity\" or \"power.\"" + }, + { + "type": "text", + "bbox": [ + 0.146, + 0.411, + 0.599, + 0.418 + ], + "angle": 0, + "content": "3. 的 - This is a possessive particle that indicates possession or relationship." + }, + { + "type": "text", + "bbox": [ + 0.146, + 0.418, + 0.703, + 0.426 + ], + "angle": 0, + "content": "4. 详细说明 - This is a compound noun that means \"detailed explanation\" or \"explanation in detail.\"" + }, + { + "type": "text", + "bbox": [ + 0.146, + 0.426, + 0.506, + 0.434 + ], + "angle": 0, + "content": "5. 一般 - This is an adverb that means \"generally\" or \"usually.\"" + }, + { + "type": "text", + "bbox": [ + 0.146, + 0.434, + 0.465, + 0.442 + ], + "angle": 0, + "content": "6. 人 - This is a noun that means \"people\" or \"persons.\"" + }, + { + "type": "text", + "bbox": [ + 0.146, + 0.442, + 0.531, + 0.451 + ], + "angle": 0, + "content": "7. 看着 - This is a verb phrase that means \"looking at\" or \"seeing.\"" + }, + { + "type": "text", + "bbox": [ + 0.146, + 0.451, + 0.604, + 0.459 + ], + "angle": 0, + "content": "8. 心里 - This is a compound noun that means \"in one's heart\" or \"in one's mind.\"" + }, + { + "type": "text", + "bbox": [ + 0.146, + 0.459, + 0.476, + 0.467 + ], + "angle": 0, + "content": "9. 会 - This is a modal verb that means \"will\" or \"would.\"" + }, + { + "type": "text", + "bbox": [ + 0.146, + 0.467, + 0.593, + 0.476 + ], + "angle": 0, + "content": "10. 发虚 - This is a verb phrase that means \"to feel faint\" or \"to feel dizzy.\"" + }, + { + "type": "text", + "bbox": [ + 0.146, + 0.476, + 0.83, + 0.485 + ], + "angle": 0, + "content": "Now, I will combine these components to form the English translation. As for the detailed explanation" + }, + { + "type": "text", + "bbox": [ + 0.146, + 0.485, + 0.654, + 0.493 + ], + "angle": 0, + "content": "of electricity, generally speaking, people will feel faint when they see it. " + }, + { + "type": "text", + "bbox": [ + 0.146, + 0.493, + 0.251, + 0.5 + ], + "angle": 0, + "content": "[COMETKiwI:60.11]" + }, + { + "type": "title", + "bbox": [ + 0.265, + 0.51, + 0.332, + 0.522 + ], + "angle": 0, + "content": "Step 400" + }, + { + "type": "text", + "bbox": [ + 0.144, + 0.525, + 0.373, + 0.534 + ], + "angle": 0, + "content": "User:至于电量的详细说明,一般人看着心里会发虚。" + }, + { + "type": "text", + "bbox": [ + 0.146, + 0.535, + 0.449, + 0.602 + ], + "angle": 0, + "content": "Assistant: My task is to accurately translate the Chinese text provided by the user into natural and coherent English while preserving the original meaning, tone, and nuances of the Chinese sentence. As for the detailed explanation of the battery, most people would feel uneasy just by looking at it. [COMETKwi: 68.44]" + }, + { + "type": "title", + "bbox": [ + 0.62, + 0.51, + 0.694, + 0.522 + ], + "angle": 0, + "content": "Step 1600" + }, + { + "type": "text", + "bbox": [ + 0.465, + 0.524, + 0.694, + 0.533 + ], + "angle": 0, + "content": "User:至于电量的详细说明,一般人看着心里会发虚。" + }, + { + "type": "text", + "bbox": [ + 0.465, + 0.533, + 0.852, + 0.608 + ], + "angle": 0, + "content": "Assistant: I translated the Chinese text \"User:至于电量的详细说明,一般人看着心里会发虚。\" to English as \"As for the detailed explanation of the battery level, most people feel a bit uneasy when they see it.\" The original text quotes the user saying that when people see the detailed information about the battery level, they feel a bit uncomfortable or uncertain about it.translate> \"As for the detailed explanation of the battery level, most people feel a bit uneasy when they see it.\"
[COMETHiwi: 74.89]" + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.625, + 0.885, + 0.668 + ], + "angle": 0, + "content": "Figure 5: Evolution of an MT-R1-Zero model's reasoning process and translation output for the Chinese source text \"其影响可能类似于2008年的经济危机\" at different training steps (0, 400, 1600), showcasing the shift from decomposition to more semantic analysis (Finding 2)." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.683, + 0.479, + 0.714 + ], + "angle": 0, + "content": "5.2 Emergence and Evolution of Translation Thinking Patterns" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.722, + 0.49, + 0.77 + ], + "angle": 0, + "content": "By observing the training process, we provide several insights into model adaptation and the emergence of reasoning." + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.789, + 0.465, + 0.837 + ], + "angle": 0, + "content": "Finding 2: Response length initially declines rapidly and then gradually increases as training progresses." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.857, + 0.489, + 0.922 + ], + "angle": 0, + "content": "Figure 4 (Right) depicts the pattern in Finding 2 alongside consistent COMETKiwi improvements (Left). Qualitative analysis (Figure 5) reveals that this length trajectory reflects evolving reasoning" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.683, + 0.885, + 0.795 + ], + "angle": 0, + "content": "strategies. The initial decline corresponds to the model mastering the required format while transitioning from naive decomposition (Step 0) to more efficient, direct translations. The subsequent increase aligns with the development of richer semantic analysis and deeper contextual reasoning within the tags (Step 1600)." + }, + { + "type": "text", + "bbox": [ + 0.535, + 0.826, + 0.859, + 0.907 + ], + "angle": 0, + "content": "Finding 3: Diverse reasoning patterns emerge autonomously, varying in style and complexity, and moreover, the internal reasoning language could dynamically transit to target languages even for OOD settings." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.943, + 0.505, + 0.955 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "aside_text", + "bbox": [ + 0.118, + 0.149, + 0.135, + 0.19 + ], + "angle": 0, + "content": "eep" + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.101, + 0.327, + 0.121 + ], + "angle": 0, + "content": "User:A few more sensors in general would be nice I guess." + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.127, + 0.334, + 0.241 + ], + "angle": 0, + "content": "Assistant: I will translate the given English sentence into Japanese. The sentence is \"A few more sensors in general would be nice I guess.\" I will break down the sentence into its main components and find the appropriate Japanese words and phrases for each part. \n 一般、少让更多的SENSAがた或良好を意思檔。" + }, + { + "type": "title", + "bbox": [ + 0.427, + 0.087, + 0.471, + 0.098 + ], + "angle": 0, + "content": "EN-ZH" + }, + { + "type": "text", + "bbox": [ + 0.34, + 0.102, + 0.506, + 0.129 + ], + "angle": 0, + "content": "User:#PilotTraining #Flying #TeaganAir #PA28 #Pilot #FlightSchool" + }, + { + "type": "text", + "bbox": [ + 0.34, + 0.136, + 0.555, + 0.241 + ], + "angle": 0, + "content": "Assistant: I will translate the given text from English to Chinese. The text contains hashtags and mentions of a pilot training program, flying, a company called Teagan Air, a specific aircraft model (PA28), and a pilot. I will translate each part of the text accordingly.\n #飞行员培训 #飞行 #泰根航空 #PA28 #飞行员 #飞行学校 " + }, + { + "type": "title", + "bbox": [ + 0.691, + 0.087, + 0.734, + 0.098 + ], + "angle": 0, + "content": "DE-ZH" + }, + { + "type": "text", + "bbox": [ + 0.561, + 0.102, + 0.866, + 0.129 + ], + "angle": 0, + "content": "User:Heutzutage schreiben Menschen Nachrichten auf Computerbildschirmen, ohne je in die Nähe eines Anspitzers kommt zu müssen." + }, + { + "type": "text", + "bbox": [ + 0.561, + 0.136, + 0.876, + 0.241 + ], + "angle": 0, + "content": "Assistant: To translate the given German sentence into Chinese, I will first break down the sentence into its main components and then find the appropriate Chinese equivalents for each component. The main components are: \"Heutzutage\" (today), \"schreiben\" (write), \"Menschen\" (people), \"Nachrichten\" (messages), \"auf Computerbildschirmen\" (on computer screens), \"ohne je\" (without ever), \"in die Näre\" (near), \"eines Anspitzers\" (a pencil), and \"kommen zu müssen\" (to have to come). \n 现在,人们可以在电脑屏幕上写信,而无需接近铅笔。" + }, + { + "type": "text", + "bbox": [ + 0.143, + 0.255, + 0.326, + 0.274 + ], + "angle": 0, + "content": "User:A few more sensors in general would be nice I guess." + }, + { + "type": "text", + "bbox": [ + 0.143, + 0.28, + 0.327, + 0.394 + ], + "angle": 0, + "content": "Assistant: 我はルーダーから、 [User: あ、一般にルーダーが減少増えたARDS']」う英語の文を翻訹てほしぃと類まんた。その文は、ルーダーがルーダーの数を増やてほしぃと顧ったとを表語言。私はこの文を本日語に翻訹てまん。「あ、一般にルーダーをむう減少増やてむいかな」」う意味です。 「一般に、ルーダーをむう減少増やてむいかな。」" + }, + { + "type": "text", + "bbox": [ + 0.34, + 0.254, + 0.572, + 0.272 + ], + "angle": 0, + "content": "User:#PilotTraining #Flying #TeaganAir #PA28 #Pilot #FlightSchool" + }, + { + "type": "text", + "bbox": [ + 0.34, + 0.279, + 0.572, + 0.357 + ], + "angle": 0, + "content": "Assistant: 我在思考如何将这段英文文本“#PilotTraining #Flying #TeaganAir #PA28 #Pilot #FlightSchool\"翻译成中文。文本中包含多个标签,这些标签可能来自社交媒体帖子或文章,用于描述与飞行员培训、飞行、TeaganAir航空公司、PA28飞机型号、飞行员、飞行学校等相关的内容。翻译时,我会将每个标签逐一翻译成中文,保留其原始含义和用途。翻译后的文本内容如下:" + }, + { + "type": "text", + "bbox": [ + 0.34, + 0.357, + 0.572, + 0.393 + ], + "angle": 0, + "content": "#飞行员培训 #飞行 #泰根航空 #PA28型飞机 #飞行员 #飞行学校 \n#飞行员培训 #飞行 #泰根航空#PA28型飞机 #飞行员 #飞行学校" + }, + { + "type": "text", + "bbox": [ + 0.582, + 0.254, + 0.863, + 0.281 + ], + "angle": 0, + "content": "User:Heutzutage schreiben Menschen Nachrichten auf Computerbildschirmen, ohne je in die Höhe eines Anspitzers kommt zu müssen." + }, + { + "type": "text", + "bbox": [ + 0.582, + 0.288, + 0.873, + 0.386 + ], + "angle": 0, + "content": "Assistant: 我在思考如何将这段德文文本 \"Heutzzutege schreiben Menschen Nachrichten auf Computerbildschirmen, ohne je in die Höhe eines Anspitzers kommt zu müssen.\"翻译成中文。文本中用户引用了一段话,指出如今人们可以在电脑屏幕上撰写消息,而无需接近任何针状物。原文指出,现在人们可以通过电脑屏幕发送消息,而不需要亲自接触到针状物(可能是指针式打字机或其他类似设备)。这段文本可能是在比较现代的数字通信方式与传统的打字或书写方式的差异。\n\"如今,人们可以在电脑屏幕上编写消息,而无需接近任何针状物。" + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.415, + 0.884, + 0.457 + ], + "angle": 0, + "content": "Figure 6: Examples illustrating language-of-thought phenomenon, i.e., transition of the internal reasoning language in MT-R1-Zero models. The reasoning language transits from English at Step 0 to target language at Step 1600, indicated by bold text across various OOD test pairs (Finding 3)." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.473, + 0.49, + 0.779 + ], + "angle": 0, + "content": "As R1-Zero-like training typically lacks a cold-start (DeepSeek-AI et al., 2025; Huang et al., 2025) phase with predefined reasoning examples, the observed thinking processes should be emergent and shaped by the RL objective. Our framework incentivizes a variety of reasoning styles within the tags (Figure 12). In particular, we observe patterns ranging from structured multi-step decomposition (Types I-III) to more colloquial processing (Types IV-V). While some instances include explicit \"review/refine\" steps, these generally appear as pre-planned components rather than the conversational, iterative self-correction characteristic of the \"Aha moment\" reported in mathematical reasoning tasks (DeepSeek-AI et al., 2025; Xie et al., 2025; Hu et al., 2025). This suggests that while MT-R1-Zero successfully encourages thinking, the complexity and specific nature of emergent reasoning are task-dependent." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.793, + 0.49, + 0.922 + ], + "angle": 0, + "content": "Furthermore, we observe a striking and interesting \"language-of-thought\" (transition in the language used for internal reasoning) phenomenon during OOD testing (Figure 6). While base models often use English as default thinking language based on template, MT-R1-Zero models progressively transit to utilize the target language of the translation task for their reasoning process within" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.473, + 0.883, + 0.553 + ], + "angle": 0, + "content": "the \\(<\\) think \\(>\\) \\(\\angle\\) /think> block during training (see bold Japanese or Chinese text in step 1600). This dynamic adaptation of the internal \"language of thought\", conditioned on the task, emerges even without direct supervision on reasoning language." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.611, + 0.855, + 0.627 + ], + "angle": 0, + "content": "5.3 Training Dynamics of Different LLMs" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.658, + 0.885, + 0.819 + ], + "angle": 0, + "content": "The effectiveness and training behavior of MT-R1-Zero are significantly influenced by the base LLM architecture and its initial state (pre-trained vs. instruction-tuned). We compare models from three distinct families: general-purpose (Qwen2.5 series\\(^{7}\\), LLaMA-3.1 series\\(^{8}\\)) and translation-specific (Tower family\\(^{9}\\)). For each model family, we include both the pre-trained base model and the corresponding instruction-finetuned variant, adapting their chat templates for the Instruct models." + }, + { + "type": "page_footnote", + "bbox": [ + 0.531, + 0.881, + 0.717, + 0.895 + ], + "angle": 0, + "content": "7https://huggingface.co/Qwen" + }, + { + "type": "page_footnote", + "bbox": [ + 0.532, + 0.895, + 0.748, + 0.907 + ], + "angle": 0, + "content": "8https://huggingface.co/meta-llama" + }, + { + "type": "page_footnote", + "bbox": [ + 0.533, + 0.907, + 0.855, + 0.921 + ], + "angle": 0, + "content": "\\(^{9}\\)https://huggingface.co/Unbabel/TowerBase-7B-v0.1" + }, + { + "type": "list", + "bbox": [ + 0.531, + 0.881, + 0.855, + 0.921 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.942, + 0.506, + 0.954 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.128, + 0.083, + 0.37, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.371, + 0.083, + 0.619, + 0.227 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.63, + 0.083, + 0.871, + 0.227 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.114, + 0.239, + 0.882, + 0.268 + ], + "angle": 0, + "content": "Figure 7: Comparison of training dynamics for different model families (Qwen2.5, LLaMA-3.1, Tower) undergoing MT-R1-Zero RL training, highlighting differences in adaptability (Finding 4)." + }, + { + "type": "title", + "bbox": [ + 0.129, + 0.278, + 0.46, + 0.299 + ], + "angle": 0, + "content": "Reference (EN): It's encouraging to see, although there is some disappointment" + }, + { + "type": "title", + "bbox": [ + 0.234, + 0.304, + 0.359, + 0.315 + ], + "angle": 0, + "content": "Qwen2.5-7B-Base" + }, + { + "type": "text", + "bbox": [ + 0.131, + 0.319, + 0.282, + 0.33 + ], + "angle": 0, + "content": "User:好看,但有点点小遗憾" + }, + { + "type": "text", + "bbox": [ + 0.131, + 0.33, + 0.458, + 0.397 + ], + "angle": 0, + "content": "Assistant: I translated the Chinese text \"User:好看,但有点点小遗憾\" to English as \"It looks good, but there is a tiny bit of disappointment.\" The user is saying that the object or situation being discussed appears nice, but there is a small aspect that they are not completely satisfied with." + }, + { + "type": "text", + "bbox": [ + 0.131, + 0.397, + 0.446, + 0.417 + ], + "angle": 0, + "content": " \"It looks good, but there is a tiny bit of disappointment." + }, + { + "type": "title", + "bbox": [ + 0.225, + 0.431, + 0.367, + 0.442 + ], + "angle": 0, + "content": "LLaMA-3.1-8B-Base" + }, + { + "type": "text", + "bbox": [ + 0.131, + 0.448, + 0.282, + 0.458 + ], + "angle": 0, + "content": "User:好看,但有点点小遗憾" + }, + { + "type": "text", + "bbox": [ + 0.131, + 0.459, + 0.466, + 0.487 + ], + "angle": 0, + "content": "Assistant: reasoning to generate translation \"It's beautiful, but there are a few minor drawbacks.\" " + }, + { + "type": "title", + "bbox": [ + 0.589, + 0.282, + 0.748, + 0.293 + ], + "angle": 0, + "content": "Qwen2.5-7B-Instruct" + }, + { + "type": "text", + "bbox": [ + 0.477, + 0.3, + 0.819, + 0.34 + ], + "angle": 0, + "content": "user 好看,但有点点小遗憾 \nassistant The user is asking to translate the Chinese phrase \"好看,但有点点小遗憾\" to English. Let's break down the phrase:" + }, + { + "type": "text", + "bbox": [ + 0.477, + 0.348, + 0.828, + 0.368 + ], + "angle": 0, + "content": "- \"好看\" means \"it looks good\" or \"it is attractive.\"\n- \"但\" is a conjunction that means \"but.\"" + }, + { + "type": "text", + "bbox": [ + 0.477, + 0.368, + 0.86, + 0.387 + ], + "angle": 0, + "content": "- \"有点点小遗憾\" means \"there is a slight bit of regret\" or \"there is a small sense of disappointment.\"" + }, + { + "type": "list", + "bbox": [ + 0.477, + 0.348, + 0.86, + 0.387 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.475, + 0.396, + 0.865, + 0.436 + ], + "angle": 0, + "content": "Putting it all together, the phrase suggests that something looks good, but there is a small element of disappointment or regret. A possible translation could be: \"It looks good, but there's a tiny bit of disappointment.\"" + }, + { + "type": "text", + "bbox": [ + 0.475, + 0.444, + 0.851, + 0.484 + ], + "angle": 0, + "content": "To ensure the translation captures the nuance, we can refine it to: \"It looks good, but there's a slight sense of regret.\" \"It looks good, but there's a slight sense of regret.\" " + }, + { + "type": "title", + "bbox": [ + 0.208, + 0.498, + 0.384, + 0.509 + ], + "angle": 0, + "content": "LLaMA-3.1-8B-Instruct" + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.515, + 0.402, + 0.535 + ], + "angle": 0, + "content": "user
\n好看,但有点点小遗憾" + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.535, + 0.456, + 0.554 + ], + "angle": 0, + "content": " assistant " + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.554, + 0.449, + 0.593 + ], + "angle": 0, + "content": " \"It's nice, but there's a little bit of a disappointment.\" \"It's nice, but there's a little bit of a disappointment.\" " + }, + { + "type": "title", + "bbox": [ + 0.497, + 0.498, + 0.597, + 0.509 + ], + "angle": 0, + "content": "TowerBase-7B" + }, + { + "type": "text", + "bbox": [ + 0.48, + 0.517, + 0.607, + 0.537 + ], + "angle": 0, + "content": "User:好看,但有点点小遗憾" + }, + { + "type": "text", + "bbox": [ + 0.48, + 0.537, + 0.606, + 0.556 + ], + "angle": 0, + "content": "Assistant: Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.48, + 0.556, + 0.609, + 0.575 + ], + "angle": 0, + "content": " \nTranslation" + }, + { + "type": "text", + "bbox": [ + 0.48, + 0.575, + 0.562, + 0.585 + ], + "angle": 0, + "content": "" + }, + { + "type": "title", + "bbox": [ + 0.679, + 0.498, + 0.813, + 0.509 + ], + "angle": 0, + "content": "TowerInstruct-7B" + }, + { + "type": "text", + "bbox": [ + 0.633, + 0.517, + 0.862, + 0.537 + ], + "angle": 0, + "content": "user好看,但有点点小遗憾" + }, + { + "type": "text", + "bbox": [ + 0.633, + 0.537, + 0.846, + 0.556 + ], + "angle": 0, + "content": " assistant 好看,但有点点小遗憾" + }, + { + "type": "text", + "bbox": [ + 0.633, + 0.556, + 0.848, + 0.585 + ], + "angle": 0, + "content": " It was beautiful, but there was a small disappointment:" + }, + { + "type": "image_caption", + "bbox": [ + 0.114, + 0.611, + 0.885, + 0.655 + ], + "angle": 0, + "content": "Figure 8: Qualitative comparison of final outputs from different starting models trained with MT-R1-Zero for the Chinese input \"好看,但有点点小遗憾\", illustrating varying degrees of format adherence and reasoning generation, including format hacking by some models (Finding 4)." + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.677, + 0.465, + 0.79 + ], + "angle": 0, + "content": "Finding 4: LLM architectures exhibit distinct adaptability and effectiveness under MT-R1-Zero, with Qwen showing the highest compatibility in format learning and reasoning generation, while LLaMA and Tower face more challenges and tend towards \"format hacking\"." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.809, + 0.49, + 0.922 + ], + "angle": 0, + "content": "As shown in Figure 7, both the translation-specific (Tower) and LLaMA-3.1 models exhibit significantly slower adaptation to the required / format compared to Qwen models, as evidenced by their delayed format error reduction. Furthermore, qualitative analysis (Figure 8) reveals that these models often" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.67, + 0.885, + 0.911 + ], + "angle": 0, + "content": "circumvent meaningful reasoning by generating minimal or templated placeholder content in the tags, potentially \"hacking\" the format reward. In contrast, Qwen2.5 models demonstrate stronger adaptability, consistently producing coherent reasoning text within the structured framework. This suggests that architectures like Qwen may possess inherent advantages for integrating structured reasoning via RL, a finding that aligns with prior work on cognitive behaviors in related domains (Gandhi et al., 2025). However, even Qwen2.5 models occasionally regress to simplistic one-sentence outputs during reasoning tasks, underscoring the instability of exploration in R1-Zero-like training paradigms." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.942, + 0.51, + 0.955 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.118, + 0.082, + 0.881, + 0.162 + ], + "angle": 0, + "content": "
ModelIn-domainOut-of-distribution
ZH-ENEN-ZHEN-JADE-ZHDE-EN (Doc)
COMETKiwiXCOMETCOMETKiwiXCOMETCOMETKiwiXCOMETCOMETKiwiXCOMETCOMETKiwiXCOMET
Qwen2.5-7B (SFT)69.2984.8067.2574.2967.7765.3967.0186.1767.4486.74
Qwen2.5-7B (RL w/o thinking)70.7886.2669.6276.0368.6868.7767.8486.6768.3188.30
Qwen2.5-7B (RL w/ thinking)70.8186.1769.4376.3669.2768.4968.7488.6968.7488.69
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.172, + 0.885, + 0.219 + ], + "angle": 0, + "content": "Table 3: Performance comparison of different training paradigms: Supervised Fine-Tuning (SFT) vs. RL with explicit thinking (RL w/ thinking) vs. RL without explicit thinking (RL w/o thinking). Results shown for in-domain and out-of-distribution tasks support the finding that the RL process itself is the primary driver of gains (Section 6)." + }, + { + "type": "table", + "bbox": [ + 0.118, + 0.237, + 0.486, + 0.334 + ], + "angle": 0, + "content": "
MODELDRT TEST SET
BLEUCOMETKIWI-22XCOMETAvg.
Qwen2.5-7B-Instruct24.1769.6661.8451.89
TowerInstruct-13B22.7170.5562.7752.01
DRT-7B35.5171.7768.4058.56
DRT-14B36.3772.1569.6459.39
Qwen2.5-7B (SFT)21.6169.9163.2051.57
Qwen2.5-7B (RL w/o thinking)28.4472.9266.1755.84
Qwen2.5-7B (RL w/ thinking)28.4273.2066.6456.09
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.343, + 0.49, + 0.388 + ], + "angle": 0, + "content": "Table 4: Performance comparison on the DRT literature translation dataset (Wang et al., 2024a) using BLEU, COMETKiwi-22, and XCOMET metrics." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.411, + 0.341, + 0.427 + ], + "angle": 0, + "content": "6 Analysis and Ablation" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.436, + 0.482, + 0.468 + ], + "angle": 0, + "content": "6.1 KL Penalty Constrains Response Length but Not Quality Gains" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.472, + 0.49, + 0.812 + ], + "angle": 0, + "content": "We investigate the effectiveness of the KL term in the GRPO objective (Equation 1) on response length and translation quality, as it would regularize the policy by discouraging large deviations from the initial reference model. We conducted experiments without the KL penalty (setting \\(\\beta = 0\\), Figure 9), and found that the average response length, after an initial drop, began to fluctuate and trend upward during training. This pattern is consistent with R1-Zero-like results in mathematical tasks (Yu et al., 2025; Yeo et al., 2025). Additional ablation of the KL penalty with COMETKiwi reveals that the improvement of translation quality appears to be largely independent of the thinking vocabulary. Significant quality gains were achieved in early-stage training (e.g., before Steps 400) before a substantial increase in response length, even in experiments conducted without the KL penalty. This suggests that performance improvements in the MT-R1-Zero setup could not be attributed solely or primarily to increasing reasoning vocabulary." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.821, + 0.479, + 0.838 + ], + "angle": 0, + "content": "6.2 Disentangling RL and Explicit Thinking" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.842, + 0.491, + 0.922 + ], + "angle": 0, + "content": "To determine whether performance gains stem primarily from the explicit step or the underlying RL optimization, we conducted an ablation study comparing three training paradigms using the similar setup from Section 4.1: 1) Supervised Fine" + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.238, + 0.871, + 0.381 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.392, + 0.866, + 0.543 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.555, + 0.886, + 0.626 + ], + "angle": 0, + "content": "Figure 9: Effect of the KL divergence penalty on EN-ZH COMETKiwi score and response length progression for models trained with (w/ KL, \\(\\beta = 0.01\\)) and without (w/o KL, \\(\\beta = 0\\)) the penalty. Experiments are conducted three times with MT-R1-Zero-7B-Sem." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.646, + 0.885, + 0.838 + ], + "angle": 0, + "content": "Tuning (SFT): The same base model is fine-tuned on the parallel data using LLaMA-Factory (Zheng et al., 2024), establishing a non-RL baseline. 2) RL w/ thinking (MT-R1-Zero-Sem): The model is trained with the rule-metric mixed reward (Format Reward and Reward-Sem) while enforcing explicit / structure generation. 3) RL w/o thinking: The model is trained with RL-zero optimization (Reward-Sem) solely to the final output, with no constraints on explicit step generation. See Appendix B for more details." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.842, + 0.886, + 0.922 + ], + "angle": 0, + "content": "The results are presented in Table 3. It reveals that the \"RL w/o thinking\" variant achieves performance comparable to MT-R1-Zero (\"RL w/ thinking\") across both in-domain and OOD tasks, while both RL configurations substantially outperform" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.943, + 0.509, + 0.955 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.125, + 0.083, + 0.312, + 0.214 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.315, + 0.083, + 0.498, + 0.214 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.499, + 0.083, + 0.684, + 0.214 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.685, + 0.083, + 0.872, + 0.214 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.124, + 0.215, + 0.312, + 0.346 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.315, + 0.215, + 0.498, + 0.346 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.215, + 0.684, + 0.346 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.686, + 0.215, + 0.872, + 0.346 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.357, + 0.882, + 0.386 + ], + "angle": 0, + "content": "Figure 10: Training progression (COMET-22) for multilingual MT-R1-Zero models based on LLaMA-3.1-8B and Qwen2.5-7B across multiple EN-XX test sets, demonstrating applicability in multilingual settings (Section 6.3)." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.401, + 0.49, + 0.641 + ], + "angle": 0, + "content": "the SFT baseline – particularly in OOD settings. This pattern is further corroborated by evaluations on the DRT test set (Table 4), a literature translation benchmark (Wang et al., 2024a), where we again observe marginal differences between RL variants but significant gains over SFT. These findings demonstrate that while the tag could facilitate emergent reasoning patterns, the major performance improvements in MT-R1-Zero are primarily from the RL framework itself. This aligns with the intuition that online RL methods, iteratively sampling and evaluating self-generated outputs against quality metrics, principally learn \"how to translate\" that surpass SFT's behavior cloning limitations." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.653, + 0.48, + 0.669 + ], + "angle": 0, + "content": "6.3 Multilingual and Low-Resource Support" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.674, + 0.49, + 0.9 + ], + "angle": 0, + "content": "To evaluate the broader applicability of our framework, we examine its effectiveness in multilingual training scenarios and its potential benefits for low-resource languages. We train multilingual MT-R1-Zero models using the Germanic language data split in the X-ALMA (Xu et al., 2024), augmented with Chinese (see Table 9 for detailed data statistics). We set the batch size to 16 and used COMET\\(22^{10}\\) as the metric reward (Reward-Sem), consistent with the evaluation protocols in X-ALMA. All models are trained for 1 epoch on 16 NVIDIA H800 80G GPUs for about 12 hours. All other hyperparameters follow the configuration described in Section 4.1. The training progress, measured by" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.401, + 0.885, + 0.449 + ], + "angle": 0, + "content": "COMET-22 for English-to-target directions, is depicted in Figure 10. We also report the XCOMET progression in Figure 11." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.45, + 0.885, + 0.578 + ], + "angle": 0, + "content": "The learning curves demonstrate consistent improvement in translation quality across languages spanning diverse resource levels, including those typically considered low-resource (e.g., Icelandic (IS) and Norwegian (NO)). The steady performance improvement observed throughout training confirms that the MT-R1-Zero framework remains effective when applied in multilingual settings." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.59, + 0.642, + 0.606 + ], + "angle": 0, + "content": "7 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.616, + 0.885, + 0.922 + ], + "angle": 0, + "content": "In this work, we introduced MT-R1-Zero, the first successful adaptation of R1-Zero RL framework to MT using a novel rule-metric mixed reward mechanism that combines format enforcement with quality metrics. Our MT-R1-Zero significantly improves translation quality, achieving leading results on multiple benchmarks, i.e., our 3B models compete with much larger open-source models, while our 7B models are on par with advanced proprietary models. The MT-R1-Zero also demonstrates strong OOD generalization and multilingual applicability. Through extensive experiments and analysis, we highlight the significant impact of reward metric choice for optimization, showcase distinct adaptability across different LLMs, and reveal that performance gains are principally from the RL process itself rather than reasoning steps or morbidity, establishing R1-Zero as a viable and potent paradigm for advancing MT. More broadly, our work high-" + }, + { + "type": "page_footnote", + "bbox": [ + 0.132, + 0.906, + 0.444, + 0.921 + ], + "angle": 0, + "content": "10https://huggingface.co/Unbabel/wmt22-comet-da" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.942, + 0.51, + 0.955 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.114, + 0.085, + 0.488, + 0.117 + ], + "angle": 0, + "content": "lights the great potential of RL for diverse language processing tasks beyond translation." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.129, + 0.221, + 0.143 + ], + "angle": 0, + "content": "Limitations" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.154, + 0.492, + 0.492 + ], + "angle": 0, + "content": "While MT-R1-Zero represents a significant advance, certain limitations remain. The emergent reasoning observed, though diverse, did not achieve the sophisticated iterative self-correction capabilities demonstrated in mathematical reasoning tasks using similar RL or R1-like methods. This discrepancy may reflect fundamental differences in task structure or indicate the need for specialized design in translation tasks. One promising direction would be developing task-specific cold-start datasets for SFT before RL optimization, though this would deviate from the pure RL paradigm we investigated here. Future work could focus on inducing deeper reasoning structures specifically beneficial for the MT task, investigating architectural adaptability across a broader range of LLMs, and developing more appropriate reward mechanisms. Exploring applications to specialized domains (e.g., law and healthcare) and general language processing tasks presents promising opportunities to extend this work." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.519, + 0.214, + 0.533 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.541, + 0.49, + 0.62 + ], + "angle": 0, + "content": "Duarte M Alves, José Pombal, Nuno M Guerreiro, Pedro H Martins, João Alves, Amin Farajian, Ben Peters, Ricardo Rei, Patrick Fernandes, Sweta Agrawal, et al. 2024. Tower: An open multilingual large language model for translation-related tasks. arXiv preprint arXiv:2402.17733." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.63, + 0.364, + 0.644 + ], + "angle": 0, + "content": "Anthropic. 2024. Claude 3.5 sonnet." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.654, + 0.49, + 0.72 + ], + "angle": 0, + "content": "Andong Chen, Yuchen Song, Wenxin Zhu, Kehai Chen, Muyun Yang, Tiejun Zhao, et al. 2025. Evaluating o1-like llms: Unlocking reasoning for translation through comprehensive analysis. arXiv preprint arXiv:2502.11544." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.729, + 0.49, + 0.808 + ], + "angle": 0, + "content": "Marta R Costa-jussà, James Cross, Onur Celebi, Maha Elbayad, Kenneth Heafield, Kevin Heffernan, Elahe Kalbassi, Janice Lam, Daniel Licht, Jean Maillard, et al. 2022. No language left behind: Scaling human-centered machine translation. arXiv preprint arXiv:2207.04672." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.818, + 0.49, + 0.883 + ], + "angle": 0, + "content": "Ganqu Cui, Lifan Yuan, Zefan Wang, Hanbin Wang, Wendi Li, Bingxiang He, Yuchen Fan, Tianyu Yu, Qixin Xu, Weize Chen, et al. 2025. Process reinforcement through implicit rewards. arXiv preprint arXiv:2502.01456." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.894, + 0.49, + 0.921 + ], + "angle": 0, + "content": "DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu," + }, + { + "type": "list", + "bbox": [ + 0.116, + 0.541, + 0.49, + 0.921 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.527, + 0.086, + 0.886, + 0.752 + ], + "angle": 0, + "content": "Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z. F. Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, Aixin Liu, Bing Xue, Bingxuan Wang, Bochao Wu, Bei Feng, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Qu, Hui Li, Jianzhong Guo, Jiashi Li, Jiawei Wang, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, J. L. Cai, Jiaqi Ni, Jian Liang, Jin Chen, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Liang Zhao, Litong Wang, Liyue Zhang, Lei Xu, Leyi Xia, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Meng Li, Miaojun Wang, Mingming Li, Ning Tian, Panpan Huang, Peng Zhang, Qiancheng Wang, Qinyu Chen, Qiushi Du, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, R. J. Chen, R. L. Jin, Ruyi Chen, Shanghao Lu, Shangyan Zhou, Shanhuang Chen, Shengfeng Ye, Shiyu Wang, Shuiping Yu, Shunfeng Zhou, Shuting Pan, S. S. Li, Shuang Zhou, Shaoqing Wu, Shengfeng Ye, Tao Yun, Tian Pei, Tianyu Sun T. Wang, Wangding Zeng, Wanjia Zhao, Wen Liu, Wenfeng Liang, Wenjun Gao, Wenqin Yu, Wentao Zhang W. L. Xiao Wei An Xiaodong Liu Xiaohan Wang Xiaokang Chen Xiaotao Nie, Xin Cheng Xien Liu Xie Xingchao Liu Xinyu Yang Xinyuan Li Xuecheng Su Xuheng Lin X.Q.Li Xiangyue Jin Xiaojin Shen Xiaosha Chen Xiaowen Sun Xiaoxiang Wang Xinnan Song Xinyi Zhou Xianzu Wang Xinxia Shan Y.K. Li Y.Q.WangY.X.Wei Yang Zhang Yanhong Xu Yao Li Yao Zhao Yaofeng Sun Yaohui Wang Yi Yu Yichao Zhang Yifan Shi Yiliang Xiong Ying He Yishi Piao Yisong Wang Yixuan Tan Yiyang Ma Yiyuan Liu Yongqiang Guo Yuan Ou Yuduan Wang Yue Gong Yuheng Zou Yujia He Yunfan Xiong Yuxiang Luo Yuxiang You Yuxuan Liu Yuyang Zhou Y.X.Zhu Yanhong Xu Yanping Huang Yaohui Li Yi Zheng Yuchen Zhu Yunxian Ma Ying Tang Yukun Zha Yuting Yan Z.Z.Ren Zehui Ren Zhangli Sha Zhe Fu Zhean Xu Zhenda Xie Zhengyan Zhang Zhewen Hao Zhicheng Ma Zhigang Yan Zhiyu Wu Zihui Gu Zijia Zhu Zijun Liu Zilin Li Ziwei Xie Ziyang Song Zizheng Pan Zhen Huang Zhipeng Xu Zhongyu Zhang and Zhen Zhang. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. Preprint arXiv:2501.12948." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.763, + 0.885, + 0.791 + ], + "angle": 0, + "content": "Hugging Face. 2025. Open r1: A fully open reproduction of deepseek-r1." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.802, + 0.885, + 0.868 + ], + "angle": 0, + "content": "Xidong Feng, Ziyu Wan, Muning Wen, Stephen Marcus McAleer, Ying Wen, Weinan Zhang, and Jun Wang. 2023. Alphazero-like tree-search can guide large language model decoding and training. arXiv preprint arXiv:2309.17179." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.88, + 0.885, + 0.921 + ], + "angle": 0, + "content": "Zhaopeng Feng, Ruizhe Chen, Yan Zhang, Zijie Meng, and Zuozhu Liu. 2024a. Ladder: A model-agnostic framework boosting LLM-based machine translation" + }, + { + "type": "list", + "bbox": [ + 0.51, + 0.086, + 0.886, + 0.921 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.942, + 0.511, + 0.955 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.135, + 0.086, + 0.49, + 0.14 + ], + "angle": 0, + "content": "to the next level. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 15377-15393, Miami, Florida, USA. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.151, + 0.489, + 0.218 + ], + "angle": 0, + "content": "Zhaopeng Feng, Jiahan Ren, Jiayuan Su, Jiamei Zheng, Zhihang Tang, Hongwei Wang, and Zuozhu Liu. 2025. Mt-rewardtree: A comprehensive framework for advancing llm-based machine translation via reward modeling. arXiv preprint arXiv:2503.12123." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.229, + 0.489, + 0.295 + ], + "angle": 0, + "content": "Zhaopeng Feng, Yan Zhang, Hao Li, Wenqiang Liu, Jun Lang, Yang Feng, Jian Wu, and Zuozhu Liu. 2024b. Improving llm-based machine translation with systematic self-correction. arXiv preprint arXiv:2402.16379." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.307, + 0.489, + 0.386 + ], + "angle": 0, + "content": "Markus Freitag, George Foster, David Grangier, Viresh Ratnakar, Qijun Tan, and Wolfgang Macherey. 2021. Experts, errors, and context: A large-scale study of human evaluation for machine translation. Transactions of the Association for Computational Linguistics, 9:1460-1474." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.399, + 0.489, + 0.517 + ], + "angle": 0, + "content": "Markus Freitag, Nitika Mathur, Chi-kiu Lo, Eleftherios Avramidis, Ricardo Rei, Brian Thompson, Tom Kocmi, Frederic Blain, Daniel Deutsch, Craig Stewart, Chrysoula Zerva, Sheila Castilho, Alon Lavie, and George Foster. 2023. Results of WMT23 metrics shared task: Metrics might be guilty but references are not innocent. In Proceedings of the Eighth Conference on Machine Translation, pages 578-628, Singapore. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.529, + 0.489, + 0.648 + ], + "angle": 0, + "content": "Markus Freitag, Ricardo Rei, Nitika Mathur, Chi-kiu Lo, Craig Stewart, Eleftherios Avramidis, Tom Kocmi, George Foster, Alon Lavie, and André F. T. Martins. 2022. Results of WMT22 metrics shared task: Stop using BLEU – neural metrics are better and more robust. In Proceedings of the Seventh Conference on Machine Translation (WMT), pages 46–68, Abu Dhabi, United Arab Emirates (Hybrid). Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.659, + 0.489, + 0.724 + ], + "angle": 0, + "content": "Kanishk Gandhi, Ayush Chakravarthy, Anikait Singh, Nathan Lile, and Noah D Goodman. 2025. Cognitive behaviors that enable self-improving reasoners, or, four habits of highly effective stars. arXiv preprint arXiv:2503.01307." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.737, + 0.489, + 0.804 + ], + "angle": 0, + "content": "Aaron Grattafori, Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Alex Vaughan, et al. 2024. The llama 3 herd of models. arXiv preprint arXiv:2407.21783." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.816, + 0.489, + 0.881 + ], + "angle": 0, + "content": "Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. 2025. rstar-math: Small llms can master math reasoning with self-evolved deep thinking. arXiv preprint arXiv:2501.04519." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.894, + 0.489, + 0.92 + ], + "angle": 0, + "content": "Nuno M Guerreiro, Ricardo Rei, Daan van Stigt, Luisa Coheur, Pierre Colombo, and Andre FT Martins." + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.086, + 0.49, + 0.92 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.53, + 0.086, + 0.884, + 0.139 + ], + "angle": 0, + "content": "2024. xcomet: Transparent machine translation evaluation through fine-grained error detection. Transactions of the Association for Computational Linguistics, 12:979-995." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.147, + 0.883, + 0.226 + ], + "angle": 0, + "content": "Minggui He, Yilun Liu, Shimin Tao, Yuanchang Luo, Hongyong Zeng, Chang Su, Li Zhang, Hongxia Ma, Daimeng Wei, Weibin Meng, et al. 2025. R1-t1: Fully incentivizing translation capability in llms via reasoning learning. arXiv preprint arXiv:2502.19735." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.235, + 0.883, + 0.314 + ], + "angle": 0, + "content": "Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, and Heung-Yeung Shum Xiangyu Zhang. 2025. Open-reasoner-zero: An open source approach to scaling reinforcement learning on the base model. https://github.com/Open-Reasoner-Zero/Open-Reasoner-Zero." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.323, + 0.883, + 0.388 + ], + "angle": 0, + "content": "Wenxuan Huang, Bohan Jia, Zijie Zhai, Shaosheng Cao, Zheyu Ye, Fei Zhao, Yao Hu, and Shaohui Lin. 2025. Vision-r1: Incentivizing reasoning capability in multimodal large language models. arXiv preprint arXiv:2503.06749." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.398, + 0.883, + 0.476 + ], + "angle": 0, + "content": "Tom Kocmi, Eleftherios Avramidis, Rachel Bawden, Ondrej Bojar, Anton Dvorkovich, Christian Federmann, Mark Fishel, Markus Freitag, Thamme Gowda, Roman Grundkiewicz, et al. 2024. Preliminary wmt24 ranking of general mt systems and llms. arXiv preprint arXiv:2407.19884." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.485, + 0.883, + 0.552 + ], + "angle": 0, + "content": "Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. 2024. Let's verify step by step. In *The Twelfth International Conference on Learning Representations*." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.56, + 0.883, + 0.613 + ], + "angle": 0, + "content": "Sinuo Liu, Chenyang Lyu, Minghao Wu, Longyue Wang, Weihua Luo, and Kaifu Zhang. 2025. New trends for modern machine translation with large reasoning models. arXiv preprint arXiv:2503.10351." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.621, + 0.883, + 0.687 + ], + "angle": 0, + "content": "Liangchen Luo, Yinxiao Liu, Rosanne Liu, Samrat Phatale, Harsh Lara, Yunxuan Li, Lei Shu, Yun Zhu, Lei Meng, Jiao Sun, et al. 2024. Improve mathematical reasoning in language models by automated process supervision. arXiv preprint arXiv:2406.06592." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.696, + 0.777, + 0.71 + ], + "angle": 0, + "content": "OpenAI. 2023. GPT-4: technical work." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.719, + 0.883, + 0.744 + ], + "angle": 0, + "content": "OpenAI. 2024. Introducing openai o1. https://openai.com/o1/. Accessed: 2024-10-02." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.754, + 0.883, + 0.846 + ], + "angle": 0, + "content": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei Jing Zhu. 2002. Bleu: a method for automatic evaluation of machine translation. In Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, pages 311-318, Philadelphia, Pennsylvania, USA. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.855, + 0.883, + 0.92 + ], + "angle": 0, + "content": "Maja Popovic. 2015. chrF: character n-gram F-score for automatic MT evaluation. In Proceedings of the Tenth Workshop on Statistical Machine Translation, pages 392–395, Lisbon, Portugal. Association for Computational Linguistics." + }, + { + "type": "list", + "bbox": [ + 0.512, + 0.086, + 0.884, + 0.92 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.943, + 0.51, + 0.954 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.086, + 0.49, + 0.153 + ], + "angle": 0, + "content": "Matt Post. 2018. A call for clarity in reporting BLEU scores. In Proceedings of the Third Conference on Machine Translation: Research Papers, pages 186-191, Brussels, Belgium. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.165, + 0.486, + 0.218 + ], + "angle": 0, + "content": "Zhenting Qi, Mingyuan Ma, Jiahang Xu, Li Lyna Zhang, Fan Yang, and Mao Yang. 2024. Mutual reasoning makes smaller llms stronger problem-solvers. arXiv preprint arXiv:2408.06195." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.229, + 0.486, + 0.295 + ], + "angle": 0, + "content": "Ricardo Rei, Craig Stewart, Ana C Farinha, and Alon Lavie. 2020. Comet: A neural framework for mt evaluation. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 2685-2702." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.307, + 0.486, + 0.4 + ], + "angle": 0, + "content": "Ricardo Rei, Marcos Treviso, Nuno M Guerreiro, Chrysoula Zerva, Ana C Farinha, Christine Maroti, José GC De Souza, Taisiya Glushkova, Duarte Alves, Luísca Coheur, et al. 2022. Cometkiwi: Ist-unbabel 2022 submission for the quality estimation shared task. In Proceedings of the Seventh Conference on Machine Translation (WMT), pages 634-645." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.412, + 0.486, + 0.464 + ], + "angle": 0, + "content": "John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. 2017. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.477, + 0.486, + 0.543 + ], + "angle": 0, + "content": "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. 2024. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.555, + 0.486, + 0.685 + ], + "angle": 0, + "content": "David Silver, Aja Huang, Chris J. Maddison, Arthur Guez, L. Sifre, George van den Driessche, Julian Schrittwieser, Ioannis Antonoglou, Vedavyas Panneershelvam, Marc Lanctot, Sander Dieleman, Dominik Grewe, John Nham, Nal Kalchbrenner, Ilya Sutskever, Timothy P. Lillicrap, Madeleine Leach, Koray Kavukcuoglu, Thore Graepel, and Demis Hassabis. 2016. Mastering the game of go with deep neural networks and tree search. Nature, 529:484-489." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.698, + 0.486, + 0.751 + ], + "angle": 0, + "content": "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. 2024. Scaling llm test-time compute optimally can be more effective than scaling model parameters arXiv preprint arXiv:2408.03314." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.764, + 0.486, + 0.843 + ], + "angle": 0, + "content": "Gemini Team, Petko Georgiev, Ving Ian Lei, Ryan Burnell, Libin Bai, Anmol Gulati, Garrett Tanzer, Damien Vincent, Zhufeng Pan, Shibo Wang, et al. 2024. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.854, + 0.486, + 0.882 + ], + "angle": 0, + "content": "Kimi Team. 2025a. Kimi k1.5: Scaling reinforcement learning with llms." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.894, + 0.486, + 0.921 + ], + "angle": 0, + "content": "Qwen Team. 2025b. Qwq-32b: Embracing the power of reinforcement learning." + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.086, + 0.49, + 0.921 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.086, + 0.881, + 0.138 + ], + "angle": 0, + "content": "Jiaan Wang, Fandong Meng, Yunlong Liang, and Jie Zhou. 2024a. Drt-o1: Optimized deep reasoning translation via long chain-of-thought. arXiv preprint arXiv:2412.17498." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.153, + 0.883, + 0.232 + ], + "angle": 0, + "content": "Yutong Wang, Jiali Zeng, Xuebo Liu, Fandong Meng, Jie Zhou, and Min Zhang. 2024b. Taste: Teaching large language models to translate through self-reflection. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 6144-6158." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.245, + 0.883, + 0.311 + ], + "angle": 0, + "content": "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.324, + 0.883, + 0.403 + ], + "angle": 0, + "content": "Violet Xiang, Charlie Snell, Kanishk Gandhi, Alon Balak, Anikait Singh, Chase Blagden, Duy Phung, Rafael Rafailov, Nathan Lile, Dakota Mahan, et al. 2025. Towards system 2 reasoning in llms: Learning how to think with meta chain-of-though. arXiv preprint arXiv:2501.04682." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.416, + 0.883, + 0.483 + ], + "angle": 0, + "content": "Tian Xie, Zitian Gao, Qingnan Ren, Haoming Luo, Yuqian Hong, Bryan Dai, Joey Zhou, Kai Qiu, Zhirong Wu, and Chong Luo. 2025. Logic-rl: Unleashing llm reasoning with rule-based reinforcement learning. arXiv preprint arXiv:2502.14768." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.496, + 0.883, + 0.562 + ], + "angle": 0, + "content": "Haoran Xu, Young Jin Kim, Amr Sharaf, and Hany Hassan Awadalla. 2023. A paradigm shift in machine translation: Boosting translation performance of large language models. arXiv preprint arXiv:2309.11674." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.576, + 0.883, + 0.641 + ], + "angle": 0, + "content": "Haoran Xu, Kenton Murray, Philipp Koehn, Hieu Hoang, Akiko Eriguchi, and Huda Khayrallah. 2024. X-alma: Plug & play modules and adaptive rejection for quality translation at scale. arXiv preprint arXiv:2410.03115." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.655, + 0.883, + 0.709 + ], + "angle": 0, + "content": "An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. 2024. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.722, + 0.883, + 0.774 + ], + "angle": 0, + "content": "Edward Yeo, Yuxuan Tong, Morry Niu, Graham Neubig, and Xiang Yue. 2025. Demystifying long chain-of-thought reasoning in llms. arXiv preprint arXiv:2502.03373." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.788, + 0.883, + 0.854 + ], + "angle": 0, + "content": "Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, et al. 2025. Dapo: An opensource llm reinforcement learning system at scale. arXiv preprint arXiv:2503.14476." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.868, + 0.883, + 0.921 + ], + "angle": 0, + "content": "Lifan Yuan, Wendi Li, Huayu Chen, Ganqu Cui, Ning Ding, Kaiyan Zhang, Bowen Zhou, Zhiyuan Liu, and Hao Peng. 2024. Free process rewards without process labels. arXiv preprint arXiv:2412.01981." + }, + { + "type": "list", + "bbox": [ + 0.511, + 0.086, + 0.883, + 0.921 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.943, + 0.509, + 0.955 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.115, + 0.086, + 0.49, + 0.165 + ], + "angle": 0, + "content": "Zhiyuan Zeng, Qinyuan Cheng, Zhangyue Yin, Bo Wang, Shimin Li, Yunhua Zhou, Qipeng Guo, Xuanjing Huang, and Xipeng Qiu. 2024. Scaling of search and learning: A roadmap to reproduce o1 from reinforcement learning perspective. arXiv preprint arXiv:2412.14135." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.174, + 0.49, + 0.241 + ], + "angle": 0, + "content": "Yu Zhao, Huifeng Yin, Bo Zeng, Hao Wang, Tianqi Shi, Chenyang Lyu, Longyue Wang, Weihua Luo, and Kaifu Zhang. 2024. Marco-o1: Towards open reasoning models for open-ended solutions. Preprint, arXiv:2411.14405." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.25, + 0.49, + 0.316 + ], + "angle": 0, + "content": "Yaowei Zheng, Richong Zhang, Junhao Zhang, Yanhan Ye, Zheyan Luo, Zhangchi Feng, and Yongqiang Ma. 2024. Llamafactory: Unified efficient finetuning of \\(100+\\) language models. arXiv preprint arXiv:2403.13372." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.329, + 0.313, + 0.344 + ], + "angle": 0, + "content": "A Evaluation Details" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.355, + 0.488, + 0.501 + ], + "angle": 0, + "content": "When evaluating model performance on the test set, we deployed open-source models locally using frameworks like vLLM11 or HuggingFace12 implementations. We use the sampling decoding strategy with a temperature of 0.2, and top_p set to 0.95. The maximum generation length was capped at 1024 tokens. We adipot the prompt showcasing in Table 5 to sample the translation (applying specific chat template when needed)." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.511, + 0.334, + 0.528 + ], + "angle": 0, + "content": "B SFT Training Details" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.537, + 0.49, + 0.731 + ], + "angle": 0, + "content": "For the Supervised Fine-Tuning (SFT) baseline compared in the ablation study (Section 6.2), we utilized LLaMA-Factory (Zheng et al., 2024). The SFT process started from the same base model architecture as the corresponding RL experiments (e.g., Qwen2.5-7B) and was performed on the identical parallel translation dataset (13,130 examples from WMT 2017-2020 after filtering, detailed in Section 4.1). The model was fine-tuned on 8 NVIDIA H800 80G GPUs for 2 epochs using a learning rate of 5e-6 and a batch size of 64, totaling approximately 400 training steps." + }, + { + "type": "table", + "bbox": [ + 0.513, + 0.133, + 0.878, + 0.222 + ], + "angle": 0, + "content": "
Inference Prompt
Translate the following text from {src_language} into {tgt_language}. {src_language}:{src_text} {tgt_language}:
" + }, + { + "type": "table_caption", + "bbox": [ + 0.508, + 0.232, + 0.884, + 0.275 + ], + "angle": 0, + "content": "Table 5: Prompt used for translation generation. {tgt_language} : target language; {src_language}: source language; {src_text}: the source test sentence." + }, + { + "type": "table", + "bbox": [ + 0.513, + 0.379, + 0.88, + 0.52 + ], + "angle": 0, + "content": "
MODELOUT-OF-DISTRIBUTION
EN-JADE-EN (Doc)DE-ZHAvg.
Strong Baseline
Qwen2.5-72B-Instruct73.2569.1369.8970.76
LLaMA3.1-70B-Instruct71.8469.2868.6769.93
Same-size Baseline
Qwen2.5-7B-Instruct64.7967.2067.8266.60
LLaMA-3.1-8B-Instruct62.4266.7764.2864.49
TowerInstruct-7B-v0.258.3369.0365.4564.27
MT-R1-Zero-7B-Lex63.3366.1764.3264.61
MT-R1-Zero-7B-Sem72.0068.4171.5170.64
MT-R1-Zero-7B-Mix69.2768.7468.7468.92
" + }, + { + "type": "table_caption", + "bbox": [ + 0.508, + 0.529, + 0.884, + 0.573 + ], + "angle": 0, + "content": "Table 6: Out-of-distribution performance comparison using the COMETKiwi metric on EN-JA, DE-EN (Doc), and DE-ZH. (Complements Table 2)." + }, + { + "type": "table", + "bbox": [ + 0.513, + 0.676, + 0.88, + 0.817 + ], + "angle": 0, + "content": "
MODELOUT-OF-DISTRIBUTION
EN-JADE-EN (Doc)DE-ZHAvg.
Strong Baseline
Qwen2.5-72B-Instruct25.0245.5440.8337.13
LLaMA3.1-70B-Instruct24.6445.9837.8536.16
Same-size Baseline
Qwen2.5-7B-Instruct18.9141.1735.2531.78
LLaMA-3.1-8B-Instruct16.2240.2831.0829.19
TowerInstruct-7B-v0.210.5243.4034.7429.55
MT-R1-Zero-7B-Lex14.9440.0137.0030.65
MT-R1-Zero-7B-Sem14.1233.1922.8323.38
MT-R1-Zero-7B-Mix20.2743.1721.4128.28
" + }, + { + "type": "table_caption", + "bbox": [ + 0.508, + 0.826, + 0.882, + 0.869 + ], + "angle": 0, + "content": "Table 7: Out-of-distribution performance comparison using the BLEU metric on EN-JA, DE-EN (Doc), and DE-ZH. (Complements Table 2)." + }, + { + "type": "page_footnote", + "bbox": [ + 0.114, + 0.881, + 0.398, + 0.922 + ], + "angle": 0, + "content": "\\(^{11}\\)https://github.com/vllm-project/vllm \n\\(^{12}\\)https://huggingface.co/docs/transformers/main_classeses/text_generation" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.942, + 0.51, + 0.955 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.115, + 0.114, + 0.885, + 0.187 + ], + "angle": 0, + "content": "
TrainTest
EN-ZHZH-ENEN-ZHZH-ENEN-JADE-ENDE-ZH
# of cases6565656599719769975491012
SourceWMT 17-20WMT 24WMT 23WMT 24WMT 23Flores
" + }, + { + "type": "table_caption", + "bbox": [ + 0.183, + 0.195, + 0.816, + 0.21 + ], + "angle": 0, + "content": "Table 8: Data statistics for the training and test sets used in the main experiments (EN \\( \\rightleftharpoons \\) ZH)." + }, + { + "type": "table", + "bbox": [ + 0.129, + 0.28, + 0.872, + 0.482 + ], + "angle": 0, + "content": "
Parallel Data
Train (from EN)Train (to EN)Test (from EN)Test (to EN)Resource
Afrikaans (AF)299434110121012Mid
Danish (DA)299435510121012Mid
Dutch (NL)299440310121012High
German (DE)701588510121012High
Icelandic (IS)499467810121012Low
Norwegian (NO)299436010121012Low
Swedish (SV)299433910121012High
Chinese (ZH)690687410121012High
English (EN)-----
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.492, + 0.884, + 0.522 + ], + "angle": 0, + "content": "Table 9: Parallel data statistics for languages used in multilingual experiments (Section 6.3), detailing training/test pairs and resource level classification." + }, + { + "type": "image", + "bbox": [ + 0.128, + 0.593, + 0.312, + 0.724 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.318, + 0.593, + 0.497, + 0.724 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.593, + 0.683, + 0.724 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.689, + 0.593, + 0.871, + 0.724 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.125, + 0.725, + 0.312, + 0.855 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.318, + 0.725, + 0.497, + 0.855 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.725, + 0.683, + 0.855 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.688, + 0.725, + 0.871, + 0.856 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.867, + 0.884, + 0.896 + ], + "angle": 0, + "content": "Figure 11: Training progression (reference-free XCOMET score) for multilingual MT-R1-Zero models based on LLaMA-3.1-8B and Qwen2.5-7B across multiple EN-XX test sets (Section 6.3)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.942, + 0.511, + 0.955 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.134, + 0.116, + 0.345, + 0.134 + ], + "angle": 0, + "content": "User:\"It's not that bad, right, Kayel?\"" + }, + { + "type": "title", + "bbox": [ + 0.134, + 0.14, + 0.24, + 0.149 + ], + "angle": 0, + "content": "Assistant: " + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.15, + 0.342, + 0.273 + ], + "angle": 0, + "content": "When approaching the task of translating the given English text into Japanese, my primary goal is to create a faithful and engaging rendition that maintains the structure, tone, and level of detail of the original while ensuring it reads smoothly and naturally in Japanese. I will pay particular attention to preserving the conversational nature of the exchange and the use of question tags (\"right, Kayel?\"), as well as the balance between the two characters' statements." + }, + { + "type": "text", + "bbox": [ + 0.134, + 0.28, + 0.325, + 0.296 + ], + "angle": 0, + "content": "In this case, I will consider the following aspects:" + }, + { + "type": "text", + "bbox": [ + 0.134, + 0.304, + 0.336, + 0.345 + ], + "angle": 0, + "content": "1. Maintaining the conversational tone: This involves using appropriate phrasing, word order, and intonation to convey the casual nature of the conversation." + }, + { + "type": "text", + "bbox": [ + 0.134, + 0.346, + 0.342, + 0.395 + ], + "angle": 0, + "content": "2. Translating question tags: Japanese question tags are typically formed differently from English, so I will find an equivalent structure that maintains the flow of the conversation." + }, + { + "type": "text", + "bbox": [ + 0.134, + 0.395, + 0.33, + 0.453 + ], + "angle": 0, + "content": "3. Preserving the balance between the two characters' statements: I will ensure that the translation reflects the equal weight given to both Kayel's response and the speaker's initial statement in the original text." + }, + { + "type": "text", + "bbox": [ + 0.134, + 0.453, + 0.342, + 0.519 + ], + "angle": 0, + "content": "4. Adapting to Japanese sentence structure: Japanese often places the verb at the end of a sentence, so I will adjust the word order accordingly while maintaining the overall meaning and rhythm of the conversation. " + }, + { + "type": "text", + "bbox": [ + 0.134, + 0.519, + 0.203, + 0.525 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.134, + 0.525, + 0.282, + 0.532 + ], + "angle": 0, + "content": "“大丈夫!也存在心。久己工儿?”" + }, + { + "type": "text", + "bbox": [ + 0.134, + 0.532, + 0.204, + 0.54 + ], + "angle": 0, + "content": "" + }, + { + "type": "title", + "bbox": [ + 0.134, + 0.546, + 0.261, + 0.555 + ], + "angle": 0, + "content": "User:好看,但有点点小遗憾" + }, + { + "type": "title", + "bbox": [ + 0.134, + 0.555, + 0.239, + 0.562 + ], + "angle": 0, + "content": "Assistant: " + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.563, + 0.351, + 0.669 + ], + "angle": 0, + "content": "The task at hand involves translating a sentence of Chinese into English while taking into consideration several factors to ensure an effective and appropriate translation. The source text is: “好看,但有点小遗憾。” Given the context in which this sentence is being used, it is clear that the speaker is expressing their positive opinion of something they have just seen or experienced, but also acknowledging a minor flaw or disappointment that slightly" + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.67, + 0.352, + 0.84 + ], + "angle": 0, + "content": "diminishes their overall satisfaction. With this in mind, it is important to maintain the same level of nuance and complexity in the translation to effectively convey the same level of emotional and intellectual engagement expected in professional and polished English communication. After careful consideration of these factors and the specific requirements of the translation context, I have decided to render the source text as follows: \"It looks good, but there's a tiny bit of disappointment.\" This translation not only accurately conveys the same level of appreciation and critique as the source text but also does so in a professional and tactful manner that respects the sensitivity and complexity of the situation at hand." + }, + { + "type": "text", + "bbox": [ + 0.134, + 0.84, + 0.199, + 0.848 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.134, + 0.849, + 0.352, + 0.866 + ], + "angle": 0, + "content": "\"It looks good, but there's a tiny bit of disappointment.\" " + }, + { + "type": "text", + "bbox": [ + 0.361, + 0.116, + 0.611, + 0.141 + ], + "angle": 0, + "content": "User:北交所融资融券交易的启动是持续推进北交所市场制度建设、完善交易机制、提升二级市场定价功能的重要举措。" + }, + { + "type": "title", + "bbox": [ + 0.361, + 0.149, + 0.466, + 0.157 + ], + "angle": 0, + "content": "Assistant: " + }, + { + "type": "text", + "bbox": [ + 0.36, + 0.157, + 0.607, + 0.256 + ], + "angle": 0, + "content": "When approaching the task of translating the given Chinese text into English, my primary goal is to create a faithful and engaging rendition that maintains the structure, tone, and level of detail of the original Chinese. I will focus on preserving the balance between providing all relevant information and maintaining readability, while also considering the natural flow of the English language. Additionally, I will pay attention to the following aspects:" + }, + { + "type": "text", + "bbox": [ + 0.361, + 0.263, + 0.6, + 0.312 + ], + "angle": 0, + "content": "1. Structure: I will aim to maintain the overall structure of the Chinese text, including the use of paragraphs and sentence order, to ensure that the English translation remains coherent and easy to follow." + }, + { + "type": "text", + "bbox": [ + 0.361, + 0.313, + 0.608, + 0.361 + ], + "angle": 0, + "content": "2. Tone and voice: I will strive to capture the tone and voice of the original text, which appears to be informative and objective, while also considering the appropriate level of formality for the target audience." + }, + { + "type": "text", + "bbox": [ + 0.361, + 0.362, + 0.608, + 0.411 + ], + "angle": 0, + "content": "3. Terminology: I will use accurate and up-to-date terminology related to the stock exchange, financing, and trading mechanisms, ensuring that all terms are clear and easily understood by the English-speaking audience." + }, + { + "type": "text", + "bbox": [ + 0.361, + 0.411, + 0.608, + 0.467 + ], + "angle": 0, + "content": "4. Cultural and linguistic nuances: I will pay attention to any cultural or linguistic nuances present in the Chinese text and find suitable ways to convey them in English, where possible, without compromising the clarity or readability of the translation." + }, + { + "type": "text", + "bbox": [ + 0.361, + 0.468, + 0.608, + 0.518 + ], + "angle": 0, + "content": "5. Number and date formats: I will convert any numbers and date formats from the Chinese system to the corresponding English system, as necessary, to ensure that the translation is both accurate and locally appropriate." + }, + { + "type": "text", + "bbox": [ + 0.361, + 0.518, + 0.608, + 0.566 + ], + "angle": 0, + "content": "6. Punctuation and formatting: I will use standard English punctuation and formatting conventions, while taking into account any specific requirements or preferences for the target audience or publication platform." + }, + { + "type": "text", + "bbox": [ + 0.361, + 0.574, + 0.602, + 0.615 + ], + "angle": 0, + "content": "By considering these aspects, I aim to produce a high-quality English translation that effectively communicates the content and intent of the original Chinese text. " + }, + { + "type": "text", + "bbox": [ + 0.361, + 0.616, + 0.426, + 0.623 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.361, + 0.623, + 0.608, + 0.681 + ], + "angle": 0, + "content": "\"The launch of financing and margin trading on the Beijing Stock Exchange is an important step in advancing the development of the exchange's market systems, improving trading mechanisms, and enhancing the secondary market's pricing capabilities.\" " + }, + { + "type": "title", + "bbox": [ + 0.361, + 0.687, + 0.583, + 0.696 + ], + "angle": 0, + "content": "User:加拿大也报告了类似的结果,只是数字较低。" + }, + { + "type": "title", + "bbox": [ + 0.361, + 0.703, + 0.467, + 0.711 + ], + "angle": 0, + "content": "Assistant: " + }, + { + "type": "text", + "bbox": [ + 0.36, + 0.712, + 0.603, + 0.833 + ], + "angle": 0, + "content": "When approaching the task of translating the given Chinese text into English, my primary goal is to create a faithful and engaging rendition that maintains the structure, tone, and level of detail of the original Chinese while ensuring it reads smoothly and naturally in English. I will focus on preserving the balance between the two sentences, maintaining the use of parallel structure where appropriate, and accurately conveying the nuances of the Chinese phrasing, including the use of numbers and the comparative nature of the information presented." + }, + { + "type": "text", + "bbox": [ + 0.361, + 0.833, + 0.428, + 0.841 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.361, + 0.842, + 0.578, + 0.868 + ], + "angle": 0, + "content": "\"Canada also reported similar results, although the numbers were lower.\" " + }, + { + "type": "text", + "bbox": [ + 0.617, + 0.116, + 0.863, + 0.141 + ], + "angle": 0, + "content": "User: to divide the 412.5 meters long, 12.9 meters wide, with a total weight of 15,000 tons of continuous beams into 96 sections." + }, + { + "type": "text", + "bbox": [ + 0.617, + 0.149, + 0.853, + 0.198 + ], + "angle": 0, + "content": "Assistant: To translate the given English sentence into Chinese, I would follow these steps to ensure accuracy, fidelity to the original text, and naturalness in the target language (Chinese):" + }, + { + "type": "text", + "bbox": [ + 0.617, + 0.206, + 0.863, + 0.272 + ], + "angle": 0, + "content": "1. Understand the context and meaning of the original text: I would first read and re-read the sentence to grasp its full meaning, paying close attention to technical terms, measurements, and the overall structure of the information being conveyed. This ensures a proper translation that maintains the original message." + }, + { + "type": "text", + "bbox": [ + 0.617, + 0.273, + 0.865, + 0.337 + ], + "angle": 0, + "content": "2. Identify key elements and vocabulary: I would identify important numbers, measurements, and specific terms that need to be accurately translated, such as \"412.5 meters,\" \"12.9 meters,\" and \"15,000 tons.\" I would also note any technical terms related to engineering or construction, such as \"continuous beams\" and \"sections.\"" + }, + { + "type": "text", + "bbox": [ + 0.617, + 0.337, + 0.865, + 0.403 + ], + "angle": 0, + "content": "3. Consider cultural and contextual nuances: While this particular sentence doesn't contain cultural references that would require adaptation, I would generally be aware of how to translate phrases or expressions that might have different connotations or usages in Chinese culture compared to English." + }, + { + "type": "title", + "bbox": [ + 0.617, + 0.403, + 0.825, + 0.411 + ], + "angle": 0, + "content": "4. Choose an appropriate translation" + }, + { + "type": "text", + "bbox": [ + 0.617, + 0.411, + 0.859, + 0.476 + ], + "angle": 0, + "content": "strategy: For this type of technical information, I would likely use a combination of literal translation and necessary modifications to fit Chinese syntax and readability. This might involve reordering words or phrases to sound more natural in Chinese while still maintaining the original meaning." + }, + { + "type": "title", + "bbox": [ + 0.617, + 0.477, + 0.813, + 0.485 + ], + "angle": 0, + "content": "5. Translate while considering the" + }, + { + "type": "text", + "bbox": [ + 0.617, + 0.485, + 0.863, + 0.541 + ], + "angle": 0, + "content": "audience: Since this translation is presumably for a Chinese-speaking audience, I would ensure that the translation aligns with how Chinese speakers might express similar ideas, using idiomatic expressions and terminology that would be familiar to them." + }, + { + "type": "text", + "bbox": [ + 0.617, + 0.542, + 0.864, + 0.599 + ], + "angle": 0, + "content": "6. Account for register and tone: Depending on the intended use of the translation (e.g., technical manual, news report, etc.), I would adjust the formality and wording to match the appropriate register. For a technical report, the language would be more formal and precise." + }, + { + "type": "text", + "bbox": [ + 0.617, + 0.6, + 0.864, + 0.664 + ], + "angle": 0, + "content": "7. Translate numbers and measurements: I would carefully translate the numbers and measurements given (412.5 meters, 12.9 meters, 15,000 tons) to their equivalent in Chinese, ensuring that the units of measurement are accurately reflected in the translation (e.g., using \"米\" for meters and \"吨\" for tons)." + }, + { + "type": "text", + "bbox": [ + 0.617, + 0.665, + 0.865, + 0.73 + ], + "angle": 0, + "content": "8. Ensure coherence and flow in the translation: After translating each part of the sentence, I would review and refine the translation to ensure that it flows naturally in Chinese, making any necessary adjustments to grammar, syntax, and phrasing to create a smooth and coherent translation." + }, + { + "type": "title", + "bbox": [ + 0.617, + 0.731, + 0.828, + 0.739 + ], + "angle": 0, + "content": "9. Review and make final adjustments:" + }, + { + "type": "text", + "bbox": [ + 0.617, + 0.74, + 0.861, + 0.804 + ], + "angle": 0, + "content": "Finally, I would review the entire translated sentence for accuracy, fluency, and naturalness in Chinese. I would make any necessary adjustments to improve the translation, checking again against the original text to ensure that all information is preserved and that there are no errors or misunderstandings." + }, + { + "type": "text", + "bbox": [ + 0.617, + 0.812, + 0.853, + 0.844 + ], + "angle": 0, + "content": "By following these steps, I can provide a translation that is accurate, faithful to the original text, and reads naturally in Chinese." + }, + { + "type": "text", + "bbox": [ + 0.617, + 0.853, + 0.848, + 0.862 + ], + "angle": 0, + "content": "将总长412.5米、宽12.9米" + }, + { + "type": "text", + "bbox": [ + 0.617, + 0.862, + 0.833, + 0.869 + ], + "angle": 0, + "content": "总重15000吨的连续梁分成96节。" + }, + { + "type": "image_caption", + "bbox": [ + 0.114, + 0.884, + 0.881, + 0.898 + ], + "angle": 0, + "content": "Figure 12: Qualitative examples (I-V) showcasing the diverse thinking patterns generated by MT-R1-Zero models." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.943, + 0.509, + 0.955 + ], + "angle": 0, + "content": "18" + }, + { + "type": "title", + "bbox": [ + 0.834, + 0.187, + 0.858, + 0.202 + ], + "angle": 0, + "content": "Ⅲ" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10160/b4495eb2-d145-42fa-9b61-d633ab2f00d9_origin.pdf b/data/2025/2504_10xxx/2504.10160/b4495eb2-d145-42fa-9b61-d633ab2f00d9_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..3d33cdd254e0abb9a17c2ec500044b8a3a741b16 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/b4495eb2-d145-42fa-9b61-d633ab2f00d9_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a68f447ec1df8cb51f3137c8ab83f98772106808cfa7f9e9ac0c51687e3976c9 +size 2255100 diff --git a/data/2025/2504_10xxx/2504.10160/full.md b/data/2025/2504_10xxx/2504.10160/full.md new file mode 100644 index 0000000000000000000000000000000000000000..5a93ad0a44778f4c33b0a1077abd48b2b36f2b80 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/full.md @@ -0,0 +1,631 @@ +# MT-R1-Zero: Advancing LLM-based Machine Translation via R1-Zero-like Reinforcement Learning + +Zhaopeng Feng $^{1}$ Shaosheng Cao $^{2\dagger}$ Jiahan Ren $^{1}$ Jiayuan Su $^{1}$ + +Ruizhe Chen $^{1}$ Yan Zhang $^{1}$ Zhe Xu $^{2}$ Yao Hu $^{2}$ Jian Wu $^{1}$ Zuozhu Liu $^{1\dagger}$ + +$^{1}$ Zhejiang University $^{2}$ Xiaohongshu Inc. + +{zhaopeng.23,zuozhuliu}@intl.zju.edu.cn + +{caoshaosheng,qiete,xiahou}@xiaohongshu.com + +# Abstract + +Large-scale reinforcement learning (RL) methods have proven highly effective in enhancing the reasoning abilities of large language models (LLMs), particularly for tasks with verifiable solutions such as mathematics and coding. However, applying this idea to machine translation (MT), where outputs are flexibly formatted and difficult to automatically evaluate with explicit rules, remains underexplored. In this work, we introduce MT-R1-Zero, the first open-source adaptation of the R1-Zero RL framework for MT without supervised fine-tuning or cold-start. We propose a rule-metric mixed reward mechanism to guide LLMs towards improved translation quality via emergent reasoning. On the WMT 24 English-Chinese benchmark, our MT-R1-Zero3B-Mix achieves competitive performance, surpassing TowerInstruct-7B-v0.2 by an average of 1.26 points. Meanwhile, our MT-R1-Zero7B-Mix attains a high average score of 62.25 across all metrics, placing it on par with advanced proprietary models such as GPT-4o and Claude-3.5-Sonnet, while the MT-R1-Zero7B-Sem variant achieves state-of-the-art scores on semantic metrics. Moreover, our work exhibits strong generalization capabilities on out-of-distribution MT tasks, robustly supporting multilingual and low-resource settings. Extensive analysis of model behavior across different initializations and reward metrics offers pioneering insight into the critical role of reward design, LLM adaptability, training dynamics, and emergent reasoning patterns within the R1-Zero paradigm for MT. Our code is available at https://github.com/fzp0424/MT-R1-Zero. + +# 1 Introduction + +Large-scale Reinforcement Learning (RL) has empowered Large Language Models (LLMs) with strong reasoning capabilities (OpenAI, 2024; Team, + +![](images/6f616ff4dcdccce81b29cf948d93e5785e99354f0a92b7b75e4b01182a86d233.jpg) +Figure 1: Performance comparison of contemporary LLM-based translation systems on the WMT 24 EN-ZH test set, plotted by average score across BLEU, COMETKiwi, and XCOMET versus model size. + +2025a,b), demonstrating significant success in tasks such as mathematical reasoning or coding in which answers can be clearly verified. In particular, DeepSeek-R1-Zero (DeepSeek-AI et al., 2025) introduced a pure rule-based RL approach that directly fosters emergent reasoning ability without requirements on structured Chain-of-Thought (CoT) data (Wei et al., 2022; Cui et al., 2025) or sophisticated techniques such as Monte Carlo Tree Search (MCTS) (Silver et al., 2016; Luo et al., 2024; Qi et al., 2024; Guan et al., 2025). However, the applicability of these methods to machine translation (MT) remains challenging and underexplored, as MT outputs are flexibly generated and hard to evaluate automatically with explicit rules. + +Recent work has launched attempts to empower LLMs for MT with reasoning capabilities (Chen et al., 2025; Liu et al., 2025). Early studies investigate explicit reasoning methods for improved translation, such as finetuning with CoT (Wang et al., 2024a) or MCTS (Zhao et al., 2024), where advanced multi-step pipelines with self-correction or long-thought agentic mechanisms are further ex + +plored (Feng et al., 2024b; Wang et al., 2024b,a). Another line of work leverages RL to empower LLMs for MT through process reward models or supervised finetuning (SFT) with manually annotated CoT data (Feng et al., 2025; He et al., 2025). However, these methods often depend on manually designed or synthetically generated structured CoT data, rely on complex search algorithms, or require explicit multi-stage prompting, leaving the potential of pure RL-based approaches largely unexplored. Furthermore, the performance reported in these studies often lags behind state-of-the-art (SoTA) open-source or proprietary models. + +Developing pure RL methods to directly enhance the reasoning ability of LLMs for better translation requires answering three key questions: 1) Feasibility: How to design R1-Zero-like RL pipelines with effective reward signals to directly solve MT tasks without binary rule-based rewards; 2) Reasoning capability: Could pure RL training cultivate emergent reasoning abilities and induce models to generate explicit thinking patterns for MT, such as multi-step CoT or verification/reflection; 3) Generalizability: Could the training paradigm generalize across different models (e.g., pre-trained base models, instruction-tuned models, or models pretrained on translation data) or diverse downstream settings (e.g., out-of-distribution, multilingual or low-resource scenarios). + +In this work, we introduce MT-R1-Zero, the first open-source implementation that extends the RL-Zero-like RL training paradigm to MT. We propose a rule-metric mixed reward mechanism that adapts the original rule-based reward concept to effectively guide training in MT scenarios. We explore different rewards optimizing over lexical (Lex), semantic (Sem), and Lex-Sem mixed (Mix) objectives to guide LLMs towards improved translation quality via emergent reasoning. Our experiments demonstrate the efficacy of this approach: as RL training progresses, our MT-R1-Zero-3B-Mix achieves competitive performance, surpassing TowerInstruct-7B-v0.2 by an average of 1.26 points across all metrics (BLEU, COMETKiwi, XCOMET) on the WMT 24 English-Chinese (EN-ZH) benchmark. Meanwhile, our MT-R1-Zero-7B-Mix surpasses LLaMA-3.1-70B by an average of 1.24 points and Qwen2.5-72B by 0.48 points, even on par with top proprietary models such as GPT-4o and Claude-3.5-Sonnet. The MT-R1-Zero further demonstrate promising generalizability across multilingual and low-resource settings. + +Extensive experiments further provide key findings and insight into the adaptation of R1-Zero paradigm to MT. First, we empirically demonstrate that the choice of metric reward plays a pivotal role in steering RL optimization and translation style (semantic or lexical) (Finding 1). Further analysis reveals that MT-R1-Zero induces diverse emergent reasoning patterns, including dynamic language-of-thought transition during translation (Findings 2 and 3). We also identify distinct RL adaptability of different base LLMs (Finding 4). Ablation studies suggest that the pure RL process alone can lead to substantial translation improvements, independent of thinking morbidity (Section 6). Our core contributions are as follows: + +- We present the first open-source implementation of the DeepSeek-R1-Zero paradigm for MT, achieving superior performance across indomain, OOD and generalization MT tasks. +- Our analysis reveals key findings and recipes for effective R1-Zero adaptation to MT, including reward metric selection, emergent reasoning patterns, training dynamics and LLM adaptability. +- Extensive experiments and ablations show that pure RL serves as the primary driver of MT improvements, with minimal dependence on forced reasoning or output length, highlighting the significant potential of RL for diverse translation applications and broader language tasks. + +# 2 Related Work + +LLM Reasoning with Post-training. Recent research indicates that scaling test-time computation can significantly enhance the ability of LLMs to tackle complex reasoning tasks (OpenAI, 2024; Zeng et al., 2024; Xiang et al., 2025). Many approaches rely on sophisticated techniques such as step-level process reward models (PRMs) that provide granular feedback (Lightman et al., 2024; Yuan et al., 2024; Snell et al., 2024) or MCTS to explore potential reasoning paths (Feng et al., 2023; Qi et al., 2024; Guan et al., 2025). A recent alternative, DeepSeek-R1-Zero (DeepSeek-AI et al., 2025), demonstrated that large-scale pure RL, guided only by formatting rules and correctness of final predictions (rule-based reward), can motivate LLMs to develop self-emergent reasoning processes for complex reasoning tasks. Subsequent work (Hu et al., 2025; Face, 2025) successfully replicated this training paradigm in open-source + +models, focusing on mathematical domains. Xie et al. (2025) further demonstrated the effectiveness and generalization capabilities of the R1-Zero paradigm using logic reasoning game problems, while Huang et al. (2025) explored its potential for vision reasoning. Despite its potential, the application of the R1-Zero RL paradigm to complex generation tasks like MT, in which the accuracy/quality of outputs is not rule-based and difficult to validate automatically, remains an open question. + +LLM Reasoning for MT. Leveraging reasoning to improve MT has garnered increasing attention, as systematically explored in Chen et al. (2025) and Liu et al. (2025). Previous work have designed multi-step processes for MT, e.g., Feng et al. (2024b) introduced an API-based self-correcting framework, and Wang et al. (2024b) employed multi-task training followed by a multistage inference phase. Wang et al. (2024a) integrated a similar procedure into inference-time CoT, using a multi-agent mechanism to synthesize long CoT prompts for English-Chinese literary translation. Efforts have also focused on reward modeling for MT reasoning. Feng et al. (2025) constructed implicit process reward models for translation and explored their effectiveness when combined with test-time search. Recent study further evaluated explicit reasoning for MT using CoT fine-tuning and MCTS to expand test-time computation (Zhao et al., 2024). He et al. (2025) demonstrated that models can acquire reasoning-based translation capabilities through multi-stage training with manually constructed CoT templates. + +However, these existing methods often necessitate manually designed or synthetically generated structured CoT data, rely on complex search algorithms (MCTS), or require explicit multi-stage prompting (self-correction). The effectiveness of large-scale pure RL training paradigms such as R1-Zero remains unexplored. Furthermore, the performance reported in these studies often lags behind state-of-the-art open-source or proprietary models. + +# 3 Method + +In this section, we present our method that trains a translation model with pure RL using a hybrid reward model. Unlike tasks with fixed correct answers, translation allows for multiple valid outputs, making the evaluation more complicated. In this work, we introduce a rule-metric mixed reward that integrates reasoning format checking with multi- + +ple translation quality assessment metrics, which is used within the Group Relative Policy Optimization (GRPO) (Shao et al., 2024) algorithm to ensure stable and efficient RL training. + +# 3.1 Rule-Metric Mixed Reward + +In RL, the reward is the main signal that drives model training. DeepSeek-R1-Zero (DeepSeek-AI et al., 2025) employs simple rule-based rewards that check whether the final answer is correct and whether the response follows a specific format. This works well for tasks with fixed format correct answers such as math or coding. However, there is often no single "correct" output for MT, impeding the design of rule-based rewards. Fortunately, the MT community has developed many evaluation metrics to measure translation quality. Recent advancements in automated MT evaluation metrics have shown promise in aligning automated assessments with human translation quality judgments (Freitag et al., 2022, 2023). Thus, we design a rule-metric mixed reward, which consists of two parts: a Format Reward that checks output structure, and a Metric Reward that evaluates translation quality. We use a structured prompt template similar to that in DeepSeek-R1-Zero: + +# Template for MT-R1-Zero + +A conversation between User and Assistant. The User asks for a translation from {src_language} to {tgt_language}, and the Assistant solves it. The Assistant first thinks about the reasoning process in the mind and then provides the user with the final translation. The reasoning process and final translation are enclosed within and tags, respectively, i.e., reasoning process here final translation here . +User:{src_text} +Assistant: + +Here, src_language and tgt_language indicate the source and target languages, and src_text denotes the source text requiring translation. + +Format Reward: We use regular expression extraction to enforce a structured response format. The model is required to place its reasoning process within tags and provide the final translation inside + +tags. The format reward score $(S_{\text{format}})$ is computed as: + +$$ +S _ {f o r m a t} = \left\{ \begin{array}{l l} 1, & \text {i f f o r m a t i s c o r r e c t} \\ - 1, & \text {i f f o r m a t i s i n c o r r e c t} \end{array} \right. +$$ + +Metric Reward: This reward evaluates the quality of model's translation, but only if the response format is correct. We use automatic evaluation metrics to calculate a translation quality score $S_{\text{metric}}$ . We explore three approaches to compute $S_{\text{metric}}$ : + +1. N-gram Lexical Matching Reward (RewardLex): Metrics such as BLEU (Papineni et al., 2002) orchrF (Popovic, 2015) evaluate translation quality by measuring the difference (primarily lexical overlap) between the translation and the human-written reference. In our experiments, we employ the BLEU score calculated via the sacrebleu $^{1}$ . +2. Semantic and Contextual Reward (Reward-Sem): Learning-based metrics like COMET (Rei et al., 2020) and COMETKiwi (Rei et al., 2022) are trained on human judgments (e.g., MQM quality assessments (Freitag et al., 2021)). These metrics can recognize good translations even if the wording differs from the reference, as long as the meaning is preserved. We use the COMETKiwi- $23^{2}$ , which was used in the WMT 24 (Kocmi et al., 2024) and only needs the source sentence and the model's translation. +3. Lexical and Semantic Mixed Reward (Reward-Mix): To capture both lexical fidelity and semantic adequacy, we use a hybrid reward (Reward-Mix) that adds together Lexical Matching Reward (Reward-Lex) and Semantic and Contextual Reward (Reward-Sem). + +Accordingly, the computation of $S_{\text{metric}}$ depends on the selected reward configuration: + +$$ +S _ {m e t r i c} = \left\{ \begin{array}{l l} \mathrm {B} (\text {t r a n s}, \text {r e f}), & \text {i f R e w a r d - L e x} \\ \mathrm {C K} (\text {s r c}, \text {t r a n s}) & \text {i f R e w a r d - S e m} \\ \mathrm {B} (\text {t r a n s}, \text {r e f}) + \mathrm {C K} (\text {s r c}, \text {t r a n s}), & \text {i f R e w a r d - M i x} \end{array} \right. +$$ + +where B denotes normalized BLEU score, CK denotes the COMETKiwi score, trans is the generated translation, ref is the reference translation, and src is the source text. + +Rule-Metric Mixed Reward: The final reward $r$ combines both the format reward ( $S_{\text{format}}$ ) and the metric reward ( $S_{\text{metric}}$ ). Formally, it is calculated using the following rule: + +$$ +r = \left\{ \begin{array}{l l} S _ {f o r m a t} - 2, & \text {i f} S _ {f o r m a t} = - 1 \\ S _ {f o r m a t} + S _ {m e t r i c}, & \text {i f} S _ {f o r m a t} = 1 \end{array} \right. +$$ + +where $S_{\text{metric}}$ is calculated only if the response format is correct. $S_{\text{format}} = 1$ . If the format is incorrect ( $S_{\text{format}} = -1$ ), we skip the metric reward evaluation and assign a fixed penalty (e.g., 2) to discourage format violations. This setup encourages the model to first learn the correct output structure. When the format is correct, the final reward becomes $r = 1 + S_{\text{metric}}$ . Unlike traditional rule-based rewards that give a fixed score for correct outputs, our approach uses a continuous metric score. This means the reward can vary within the [1, 2] or [1, 3] range, depending on translation quality. As a result, the model receives more detailed feedback and can learn to improve even small differences in translation quality across correctly formatted outputs. + +# 3.2 RL Algorithm + +We use the Group Relative Policy Optimization (GRPO) algorithm (Shao et al., 2024) to train the translation model with our rule-metric mixed reward. In each training step, for a given translational question $q$ , we sample a group of candidate outputs $\{o_1, o_2, \dots, o_G\}$ from the policy model $\pi_{\theta_{old}}$ . $A_i = \frac{r_i - \mathrm{mean}(\{r_1, r_2, \dots, r_G\})}{\mathrm{std}(\{r_1, r_2, \dots, r_G\})}$ is the computed advantage using the group rule-metric mixed rewards $\{r_1, r_2, \dots, r_G\}$ . GRPO then maximizes the following objective function to optimize $\pi_\theta$ : + +$$ +\begin{array}{l} J _ {\mathrm {G R P O}} (\theta) = \mathbb {E} _ {q \sim P (Q), \{o _ {i} \} _ {i = 1} ^ {G} \sim \pi_ {\theta_ {\mathrm {o l d}}} (O | q)} \\ \left[ \frac {1}{G} \sum_ {i = 1} ^ {G} \min \left(\frac {\pi_ {\theta} (o _ {i} \mid q)}{\pi_ {\theta_ {\mathrm {o l d}}} (o _ {i} \mid q)} A _ {i}, \right. \right. \\ \operatorname {c l i p} \left(\frac {\pi_ {\theta} (o _ {i} \mid q)}{\pi_ {\theta_ {\mathrm {o l d}}} (o _ {i} \mid q)}, 1 - \varepsilon , 1 + \varepsilon\right) A _ {i}\left. \right) \\ \left. - \beta D _ {\mathrm {K L}} \left(\pi_ {\theta} \| \pi_ {\text {r e f}}\right) \right], \tag {1} \\ \end{array} +$$ + +where $\varepsilon$ and $\beta$ are hyperparameters controlling the PPO clipping threshold and the weight of the Kullback-Leibler (KL) divergence penalty (Schulman et al., 2017; Shao et al., 2024), respectively. Specifically, $\varepsilon$ determines the permissible range for policy + +updates, while $\beta$ regulates the magnitude of the KL penalty during training to prevent excessive policy shifts from the reference policy $\pi_{ref}$ (typically the initialization of $\pi_{\theta}$ ). $D_{KL}(\pi_{\theta} \| \pi_{\mathrm{ref}}) = \frac{\pi_{\mathrm{ref}}(o_i|q)}{\pi_{\theta}(o_i|q)} - \log \left(\frac{\pi_{\mathrm{ref}}(o_i|q)}{\pi_{\theta}(o_i|q)}\right) - 1$ is the KL divergence approximation term. + +# 4 Experiments + +# 4.1 Experimental Setup + +Dataset and Benchmarks. Our primary experimental focus is on English (EN) and Chinese (ZH). Following Xu et al. (2023) and Feng et al. (2024a), we collect parallel examples $(\mathrm{EN} \rightleftharpoons \mathrm{ZH})$ sourced from WMT 2017 through WMT 2020. We apply a filter to exclude sentences containing fewer than 30 characters, leading to a final training set of 13,130 examples. For evaluation, we assess performance on two in-domain translation tasks using recent WMT benchmarks: EN-ZH (WMT $24^{3}$ ) and ZHEN (WMT $23^{4}$ ). Additionally, we evaluate generalization capabilities on three out-of-distribution (OOD) translation directions: English-Japanese (EN-JA, WMT 2024), German-English (DE-EN, WMT 2023 Document-level), and German-Chinese (DE-ZH, Flores-200 (Costa-jussa et al., 2022)). Detailed statistics are presented in Table 8. + +Baselines. Our primary baselines encompass leading proprietary models, namely Claude-3.5-Sonnet (Anthropic, 2024), GPT-4o (OpenAI, 2023), and Gemini-1.5-Pro (Team et al., 2024), alongside advanced open-source models such as the Qwen2.5 series (Yang et al., 2024), LLaMA-3.1 series (Grattafori et al., 2024), and the translation-specific Tower family (Alves et al., 2024). Proprietary models were accessed via their APIs5. More evaluation details can be found in Appendix A. + +Evaluation Metrics. We assess translation quality using a suite of three complementary metrics: the lexical metric BLEU (Post, 2018), the reference-free learning-based metric COMETKiwi (Rei et al., 2022) (COMETKiwi-23-XL), and the reference-based learning-based metric XCOMET (Guerreiro et al., 2024) (XCOMET-XL). Together, these metrics provide a comprehensive view by evaluating both lexical fidelity and semantic adequacy. + +Training Details. Our implementation is based on + +the verl $^{6}$ framework. We selected the Qwen2.5-base series (3B and 7B parameter variants) as starting models for MT-R1-Zero training. During training, we configure a batch size of 8 and utilize 8 rollouts per prompt within the GRPO algorithm. We employ a constant learning rate of 5e-7 and set the sampling temperature to 1.0. The maximum generation length for responses is capped at 1024 tokens. We set the KL penalty coefficient $\beta$ to 0, thereby removing the KL constraint against the reference policy. This decision stems from our empirical observation that the KL penalty tends to restrict the model's exploration of diverse response lengths, which we will discuss further in Section 6.1. The PPO clipping range $\epsilon$ is set to 0.2. All models are trained for 1 epoch on 4 NVIDIA H800 80G GPUs for about 13 hours. + +# 4.2 Main Results + +In-Domain Performance. Our models show substantial gains over their corresponding base versions, and exhibit competing performance compared to existing SoTA benchmarks (Table 1). On the EN-ZH direction, our MT-R1-Zero-7B-Mix on the average score (62.25) also surpasses GPT-4o (61.86) and Qwen2.5-72B (61.77). In addition, the MT-R1-Zero-7B-Sem achieves the best semantic-level performance on EN-ZH, scoring 72.07 on COMETKiwi and 79.37 on XCOMET. This surpasses the strongest proprietary model, Claude3.5-Sonnet, by 1.68 COMETKiwi points and exceeds the best listed open-source model, Qwen2.5-72B, by more than 3 points. On the ZH-EN direction, MT-R1-Zero-7B-Mix is also highly competitive. Our MT-R1-Zero-7B-Sem achieves a COMETKiwi score of 71.66, which is comparable to the top closed models (Claude-3.5-Sonnet 71.69, GPT-4o 71.63) and surpasses strong open-source models such as LLaMA-3.1-70B (70.43) and Qwen2.5-72B (70.95). Furthermore, the MT-R1-Zero-3B-Sem delivers impressive performance for its scale. It scores 69.75 COMETKiwi on EN-ZH, which is approximately 1.7 points higher than the much larger LLaMA-3.1-70B and over 0.7 points above Qwen2.5-72B. + +Out-of-Distribution Performance. Table 2 reports the XCOMET of our models on OOD language pairs with a zero-shot setting (models trained only on EN-ZH/ZH-EN). Despite this challenging setup, our models exhibit strong generaliza + +
MODELZH-ENEN-ZH
BLEUCOMETKiwiXCOMETAvg.BLEUCOMETKiwiXCOMETAvg.
Closed
Claude-3.5-Sonnet (2024/10)22.5571.6987.3260.5238.6370.3978.2462.42
GPT-4o (2024/08)22.5771.6387.2260.4741.1369.0175.4361.86
Gemini-1.5-Pro (2025/03)18.3469.2385.5557.7139.8267.4776.2661.18
Open
General Purpose LLMs
LLaMA-3.1-70B-Instruct25.1970.4386.2160.6139.8268.0575.1761.01
Qwen2.5-72B-Instruct21.9670.9587.0759.9939.2969.0476.9761.77
Qwen2.5-32B-Instruct20.5469.3585.4758.4536.3668.4374.9059.90
Translation-Specific LLMs
TowerInstruct-13B-v0.124.7270.1785.6960.1937.0666.2273.1358.80
TowerInstruct-7B-v0.223.3269.9984.9359.4134.9364.0470.6756.55
Ours
Qwen2.5-3B-Base14.2664.8676.7651.9615.9052.0567.1345.03
MT-R1-Zero-3B-Lex21.5366.3381.6956.5233.7060.5865.6753.32
MT-R1-Zero-3B-Sem18.4170.3385.9858.2424.3269.7576.9257.00
MT-R1-Zero-3B-Mix22.5468.8484.0858.4936.2765.0572.1057.81
Qwen2.5-7B-Base18.2368.2784.9957.1631.1463.3869.8354.78
MT-R1-Zero-7B-Lex23.5665.3582.1257.0140.1164.5770.2158.30
MT-R1-Zero-7B-Sem16.6271.6686.0758.1223.0772.0779.3758.17
MT-R1-Zero-7B-Mix23.9870.8186.1760.3240.9769.4376.3662.25
+ +Table 1: Performance comparison on in-domain translation directions (EN-ZH, ZH-EN) using BLEU, COMETKiwi, and XCOMET metrics, with average metric scores (Avg.). MT-R1-Zero variants (-Lex, -Sem, -Mix) are compared against closed and open baselines, which are further categorized by accessibility and specialization. The -Mix variant often achieves the best balance, while -Sem reaches peak semantic scores. + +
MODELOUT-OF-DISTRIBUTION
EN-JADE-EN (DOC)DE-ZHAvg.
Strong Baseline
Qwen2.5-72B-Instruct76.8689.5188.4284.93
LLaMA3.1-70B-Instruct75.6788.7287.4283.94
Same-size Baseline
Qwen2.5-7B-Instruct63.7487.4584.4378.54
LLaMA-3.1-8B-Instruct64.5086.8482.2377.86
TowerInstruct-7B-v0.256.7389.4784.2876.83
MT-R1-Zero-7B-Lex60.6585.2583.8676.59
MT-R1-Zero-7B-Sem71.9587.6887.6682.43
MT-R1-Zero-7B-Mix68.4988.6988.6981.96
+ +Table 2: Out-of-distribution performance comparison using the XCOMET metric on EN-JA, DE-EN (Document-level), and DE-ZH. + +tion. The MT-R1-Zero-7B-Sem achieves the highest average XCOMET score (82.43) across the OOD tasks, reaching top scores on EN-JA (71.95) and DE-EN (87.68). The MT-R1-Zero-7B-Mix also demonstrates highly competitive generalization with an average score of 81.96, and secures the highest score on DE-ZH (88.69). While these variants do not consistently surpass the much larger strong baselines (Qwen2.5-72B Avg. 84.93, LLaMA3.1-70B Avg. 83.94), they are still highly competitive. Crucially, MT-R1-Zero-7B-Sem and -Mix significantly outperform all same-size baselines (Qwen2.5-7B-Instruct Avg. 78.54, LLaMA + +3.1-8B-Instruct Avg. 77.86, TowerInstruct-7B-v0.2 Avg. 76.83) by a considerable margin (at least 3.4 points). These OOD results suggest that the quality improvements in MT-R1-Zero can effectively transfer to unseen language pairs. Results using COMETKiwi and BLEU are also provided in Appendix Tables 6 and 7, respectively. + +# 5 Key Findings and Insight + +Based on our extensive experiments adapting the R1-Zero paradigm to MT, we identify several key findings regarding the underlying mechanisms, design ideas, and emergent behaviors of our MT-R1-Zero framework. + +# 5.1 Impact of Reward Metric Selection + +As detailed in Section 3.1, we explore three metric rewards: Reward-Lex, Reward-Sem, and Reward-Mix. Our results demonstrate that the choice among these significantly affects the learning target and final model outputs, as stated in Finding 1. + +Finding 1: Reward metric selection critically shapes optimization targets and translation style. + +![](images/23089aaf58a392db50729b3ce583e9bf14970ec78ddace94058d3f25fd77b580.jpg) +Figure 2: Training dynamics using Reward-Lex, Reward-Sem, and Reward-Mix, evaluated with COMETKiwi, BLEU, and XCOMET. + +![](images/6c56dc9a984522cc0e741f0de6a2bac59cf804d50995fb0411cd7ea83591766e.jpg) + +![](images/b0f8de023d87072cb76332d6598f440181ff0dd4eb685e2fd493c511dac131f7.jpg) + +![](images/ba5a6d095eeb40acfd1aa4e6f18c36e661f2b8ecf8e4bc67ac76bd24ea907ff4.jpg) +Figure 3: Qualitative examples illustrates the effect of different reward functions (Reward-Lex, Reward-Sem, Reward-Mix) on EN-ZH translation, where the stylistic differences are driven by reward optimization (Finding 1). + +Figure 2 presents the training dynamics with different rewards. Training with Reward-Lex maximizes BLEU scores, often at the expense of semantic scores, while Reward-Sem maximizes COMETKiwi, leading to a decline in BLEU. Training with Reward-Mix improves both metrics, with a trade-off of achieving sub-optimal COMETKiwi compared to Reward-Sem. Independent evaluation with XCOMET further supports this finding, showing consistent improvements for Sem and Mix variants while fluctuating for Lex. This finding aligns with the insight from Chen et al. (2025), suggesting that lexical and semantic assessments are complementary, particularly for reasoning-oriented LLMs, and combining them can offer a more comprehensive evaluation signal. + +Qualitatively (Figure 3), this optimization alignment manifests as distinct translation styles. BLEU optimization encourages literal, n-gram focused translations, potentially sacrificing nuance. COMETKiwi optimization fosters translations that prioritize semantic faithfulness, even if lexically divergent from references. In contrast, the mixed reward yields balanced translations. This demonstrates that the metric reward fundamentally dictates the nature of the translation quality learned (e.g., semantic v.s. lexical). Therefore, careful metric selection and deliberate fusion are essential for tailoring RL-based MT refinement towards specific and desired translations. + +![](images/d091b63c34fab3acdd1a2db73581403ba0aaffa3a30b26bc7b4f08eb9981e3cc.jpg) +Figure 4: Training dynamics of MT-R1-Zero models (using Reward-Sem). Left: COMETKiwi score progression for 3B and 7B models on EN-ZH and ZH-EN test sets. Right: Average response length changes over training steps, exhibiting the classic decrease-then-increase pattern (Finding 2). + +![](images/1b66e6c2a22b2c8383e6ce327aa237f85185f220afb86525d8f7223f066d8e8c.jpg) +Figure 5: Evolution of an MT-R1-Zero model's reasoning process and translation output for the Chinese source text "其影响可能类似于2008年的经济危机" at different training steps (0, 400, 1600), showcasing the shift from decomposition to more semantic analysis (Finding 2). + +Source (ZH):至于电量的详细说明,一般人看着心里会发虚。 + +Reference (EN):The majority of individuals will feel guilty after reading the in-depth description of the power. + +# Step 0 + +User:至于电量的详细说明,一般人看着心里会发虚。 + +Assistant: To translate the given Chinese sentence into English, I will first break down the sentence into its components and then find the appropriate English equivalents for each component. The sentence is: "至于电量的详细说明,一般人看着心里会发虚。" + +1. 至于 - This is a preposition that can be translated to "as for" or "regarding." + +2. 电量 - This is a compound noun that means "electricity" or "power." + +3. 的 - This is a possessive particle that indicates possession or relationship. + +4. 详细说明 - This is a compound noun that means "detailed explanation" or "explanation in detail." + +5. 一般 - This is an adverb that means "generally" or "usually." + +6. 人 - This is a noun that means "people" or "persons." + +7. 看着 - This is a verb phrase that means "looking at" or "seeing." + +8. 心里 - This is a compound noun that means "in one's heart" or "in one's mind." + +9. 会 - This is a modal verb that means "will" or "would." + +10. 发虚 - This is a verb phrase that means "to feel faint" or "to feel dizzy." + +Now, I will combine these components to form the English translation. As for the detailed explanation + +of electricity, generally speaking, people will feel faint when they see it. + +[COMETKiwI:60.11] + +# Step 400 + +User:至于电量的详细说明,一般人看着心里会发虚。 + +Assistant: My task is to accurately translate the Chinese text provided by the user into natural and coherent English while preserving the original meaning, tone, and nuances of the Chinese sentence. As for the detailed explanation of the battery, most people would feel uneasy just by looking at it. [COMETKwi: 68.44] + +# Step 1600 + +User:至于电量的详细说明,一般人看着心里会发虚。 + +Assistant: I translated the Chinese text "User:至于电量的详细说明,一般人看着心里会发虚。" to English as "As for the detailed explanation of the battery level, most people feel a bit uneasy when they see it." The original text quotes the user saying that when people see the detailed information about the battery level, they feel a bit uncomfortable or uncertain about it.translate> "As for the detailed explanation of the battery level, most people feel a bit uneasy when they see it."
[COMETHiwi: 74.89] + +# 5.2 Emergence and Evolution of Translation Thinking Patterns + +By observing the training process, we provide several insights into model adaptation and the emergence of reasoning. + +Finding 2: Response length initially declines rapidly and then gradually increases as training progresses. + +Figure 4 (Right) depicts the pattern in Finding 2 alongside consistent COMETKiwi improvements (Left). Qualitative analysis (Figure 5) reveals that this length trajectory reflects evolving reasoning + +strategies. The initial decline corresponds to the model mastering the required format while transitioning from naive decomposition (Step 0) to more efficient, direct translations. The subsequent increase aligns with the development of richer semantic analysis and deeper contextual reasoning within the tags (Step 1600). + +Finding 3: Diverse reasoning patterns emerge autonomously, varying in style and complexity, and moreover, the internal reasoning language could dynamically transit to target languages even for OOD settings. + +User:A few more sensors in general would be nice I guess. + +Assistant: I will translate the given English sentence into Japanese. The sentence is "A few more sensors in general would be nice I guess." I will break down the sentence into its main components and find the appropriate Japanese words and phrases for each part. + 一般、少让更多的SENSAがた或良好を意思檔。 + +# EN-ZH + +User:#PilotTraining #Flying #TeaganAir #PA28 #Pilot #FlightSchool + +Assistant: I will translate the given text from English to Chinese. The text contains hashtags and mentions of a pilot training program, flying, a company called Teagan Air, a specific aircraft model (PA28), and a pilot. I will translate each part of the text accordingly. + #飞行员培训 #飞行 #泰根航空 #PA28 #飞行员 #飞行学校 + +# DE-ZH + +User:Heutzutage schreiben Menschen Nachrichten auf Computerbildschirmen, ohne je in die Nähe eines Anspitzers kommt zu müssen. + +Assistant: To translate the given German sentence into Chinese, I will first break down the sentence into its main components and then find the appropriate Chinese equivalents for each component. The main components are: "Heutzutage" (today), "schreiben" (write), "Menschen" (people), "Nachrichten" (messages), "auf Computerbildschirmen" (on computer screens), "ohne je" (without ever), "in die Näre" (near), "eines Anspitzers" (a pencil), and "kommen zu müssen" (to have to come). + 现在,人们可以在电脑屏幕上写信,而无需接近铅笔。 + +User:A few more sensors in general would be nice I guess. + +Assistant: 我はルーダーから、 [User: あ、一般にルーダーが減少増えたARDS']」う英語の文を翻訹てほしぃと類まんた。その文は、ルーダーがルーダーの数を増やてほしぃと顧ったとを表語言。私はこの文を本日語に翻訹てまん。「あ、一般にルーダーをむう減少増やてむいかな」」う意味です。 「一般に、ルーダーをむう減少増やてむいかな。」 + +User:#PilotTraining #Flying #TeaganAir #PA28 #Pilot #FlightSchool + +Assistant: 我在思考如何将这段英文文本“#PilotTraining #Flying #TeaganAir #PA28 #Pilot #FlightSchool"翻译成中文。文本中包含多个标签,这些标签可能来自社交媒体帖子或文章,用于描述与飞行员培训、飞行、TeaganAir航空公司、PA28飞机型号、飞行员、飞行学校等相关的内容。翻译时,我会将每个标签逐一翻译成中文,保留其原始含义和用途。翻译后的文本内容如下: + +#飞行员培训 #飞行 #泰根航空 #PA28型飞机 #飞行员 #飞行学校 +#飞行员培训 #飞行 #泰根航空#PA28型飞机 #飞行员 #飞行学校 + +User:Heutzutage schreiben Menschen Nachrichten auf Computerbildschirmen, ohne je in die Höhe eines Anspitzers kommt zu müssen. + +Assistant: 我在思考如何将这段德文文本 "Heutzzutege schreiben Menschen Nachrichten auf Computerbildschirmen, ohne je in die Höhe eines Anspitzers kommt zu müssen."翻译成中文。文本中用户引用了一段话,指出如今人们可以在电脑屏幕上撰写消息,而无需接近任何针状物。原文指出,现在人们可以通过电脑屏幕发送消息,而不需要亲自接触到针状物(可能是指针式打字机或其他类似设备)。这段文本可能是在比较现代的数字通信方式与传统的打字或书写方式的差异。 +"如今,人们可以在电脑屏幕上编写消息,而无需接近任何针状物。 + +Figure 6: Examples illustrating language-of-thought phenomenon, i.e., transition of the internal reasoning language in MT-R1-Zero models. The reasoning language transits from English at Step 0 to target language at Step 1600, indicated by bold text across various OOD test pairs (Finding 3). + +As R1-Zero-like training typically lacks a cold-start (DeepSeek-AI et al., 2025; Huang et al., 2025) phase with predefined reasoning examples, the observed thinking processes should be emergent and shaped by the RL objective. Our framework incentivizes a variety of reasoning styles within the tags (Figure 12). In particular, we observe patterns ranging from structured multi-step decomposition (Types I-III) to more colloquial processing (Types IV-V). While some instances include explicit "review/refine" steps, these generally appear as pre-planned components rather than the conversational, iterative self-correction characteristic of the "Aha moment" reported in mathematical reasoning tasks (DeepSeek-AI et al., 2025; Xie et al., 2025; Hu et al., 2025). This suggests that while MT-R1-Zero successfully encourages thinking, the complexity and specific nature of emergent reasoning are task-dependent. + +Furthermore, we observe a striking and interesting "language-of-thought" (transition in the language used for internal reasoning) phenomenon during OOD testing (Figure 6). While base models often use English as default thinking language based on template, MT-R1-Zero models progressively transit to utilize the target language of the translation task for their reasoning process within + +the $<$ think $>$ $\angle$ /think> block during training (see bold Japanese or Chinese text in step 1600). This dynamic adaptation of the internal "language of thought", conditioned on the task, emerges even without direct supervision on reasoning language. + +# 5.3 Training Dynamics of Different LLMs + +The effectiveness and training behavior of MT-R1-Zero are significantly influenced by the base LLM architecture and its initial state (pre-trained vs. instruction-tuned). We compare models from three distinct families: general-purpose (Qwen2.5 series $^{7}$ , LLaMA-3.1 series $^{8}$ ) and translation-specific (Tower family $^{9}$ ). For each model family, we include both the pre-trained base model and the corresponding instruction-finetuned variant, adapting their chat templates for the Instruct models. + +![](images/af5d7b3938351799e5ac948de228b3fbb521d21706b10eec2d5ab3231e32afd1.jpg) +Figure 7: Comparison of training dynamics for different model families (Qwen2.5, LLaMA-3.1, Tower) undergoing MT-R1-Zero RL training, highlighting differences in adaptability (Finding 4). + +![](images/c68080def505d3b55351519d62c72fbac53f65b34aaeb4005232f5ce0d4e46b6.jpg) +Figure 8: Qualitative comparison of final outputs from different starting models trained with MT-R1-Zero for the Chinese input "好看,但有点点小遗憾", illustrating varying degrees of format adherence and reasoning generation, including format hacking by some models (Finding 4). + +![](images/c383c95349861f941ebf06b185adff0fab2153b9e73c7a737770d34165272b50.jpg) + +# Reference (EN): It's encouraging to see, although there is some disappointment + +# Qwen2.5-7B-Base + +User:好看,但有点点小遗憾 + +Assistant: I translated the Chinese text "User:好看,但有点点小遗憾" to English as "It looks good, but there is a tiny bit of disappointment." The user is saying that the object or situation being discussed appears nice, but there is a small aspect that they are not completely satisfied with. + + "It looks good, but there is a tiny bit of disappointment. + +# LLaMA-3.1-8B-Base + +User:好看,但有点点小遗憾 + +Assistant: reasoning to generate translation "It's beautiful, but there are a few minor drawbacks." + +# Qwen2.5-7B-Instruct + +user 好看,但有点点小遗憾 +assistant The user is asking to translate the Chinese phrase "好看,但有点点小遗憾" to English. Let's break down the phrase: + +- "好看" means "it looks good" or "it is attractive." +- "但" is a conjunction that means "but." +- "有点点小遗憾" means "there is a slight bit of regret" or "there is a small sense of disappointment." + +Putting it all together, the phrase suggests that something looks good, but there is a small element of disappointment or regret. A possible translation could be: "It looks good, but there's a tiny bit of disappointment." + +To ensure the translation captures the nuance, we can refine it to: "It looks good, but there's a slight sense of regret." "It looks good, but there's a slight sense of regret." + +# LLaMA-3.1-8B-Instruct + +user
+好看,但有点点小遗憾 + + assistant + + "It's nice, but there's a little bit of a disappointment." "It's nice, but there's a little bit of a disappointment." + +# TowerBase-7B + +User:好看,但有点点小遗憾 + +Assistant: Reasoning + + +Translation + + + +# TowerInstruct-7B + +user好看,但有点点小遗憾 + + assistant 好看,但有点点小遗憾 + + It was beautiful, but there was a small disappointment: + +Finding 4: LLM architectures exhibit distinct adaptability and effectiveness under MT-R1-Zero, with Qwen showing the highest compatibility in format learning and reasoning generation, while LLaMA and Tower face more challenges and tend towards "format hacking". + +As shown in Figure 7, both the translation-specific (Tower) and LLaMA-3.1 models exhibit significantly slower adaptation to the required / format compared to Qwen models, as evidenced by their delayed format error reduction. Furthermore, qualitative analysis (Figure 8) reveals that these models often + +circumvent meaningful reasoning by generating minimal or templated placeholder content in the tags, potentially "hacking" the format reward. In contrast, Qwen2.5 models demonstrate stronger adaptability, consistently producing coherent reasoning text within the structured framework. This suggests that architectures like Qwen may possess inherent advantages for integrating structured reasoning via RL, a finding that aligns with prior work on cognitive behaviors in related domains (Gandhi et al., 2025). However, even Qwen2.5 models occasionally regress to simplistic one-sentence outputs during reasoning tasks, underscoring the instability of exploration in R1-Zero-like training paradigms. + +
ModelIn-domainOut-of-distribution
ZH-ENEN-ZHEN-JADE-ZHDE-EN (Doc)
COMETKiwiXCOMETCOMETKiwiXCOMETCOMETKiwiXCOMETCOMETKiwiXCOMETCOMETKiwiXCOMET
Qwen2.5-7B (SFT)69.2984.8067.2574.2967.7765.3967.0186.1767.4486.74
Qwen2.5-7B (RL w/o thinking)70.7886.2669.6276.0368.6868.7767.8486.6768.3188.30
Qwen2.5-7B (RL w/ thinking)70.8186.1769.4376.3669.2768.4968.7488.6968.7488.69
+ +Table 3: Performance comparison of different training paradigms: Supervised Fine-Tuning (SFT) vs. RL with explicit thinking (RL w/ thinking) vs. RL without explicit thinking (RL w/o thinking). Results shown for in-domain and out-of-distribution tasks support the finding that the RL process itself is the primary driver of gains (Section 6). + +
MODELDRT TEST SET
BLEUCOMETKIWI-22XCOMETAvg.
Qwen2.5-7B-Instruct24.1769.6661.8451.89
TowerInstruct-13B22.7170.5562.7752.01
DRT-7B35.5171.7768.4058.56
DRT-14B36.3772.1569.6459.39
Qwen2.5-7B (SFT)21.6169.9163.2051.57
Qwen2.5-7B (RL w/o thinking)28.4472.9266.1755.84
Qwen2.5-7B (RL w/ thinking)28.4273.2066.6456.09
+ +Table 4: Performance comparison on the DRT literature translation dataset (Wang et al., 2024a) using BLEU, COMETKiwi-22, and XCOMET metrics. + +# 6 Analysis and Ablation + +# 6.1 KL Penalty Constrains Response Length but Not Quality Gains + +We investigate the effectiveness of the KL term in the GRPO objective (Equation 1) on response length and translation quality, as it would regularize the policy by discouraging large deviations from the initial reference model. We conducted experiments without the KL penalty (setting $\beta = 0$ , Figure 9), and found that the average response length, after an initial drop, began to fluctuate and trend upward during training. This pattern is consistent with R1-Zero-like results in mathematical tasks (Yu et al., 2025; Yeo et al., 2025). Additional ablation of the KL penalty with COMETKiwi reveals that the improvement of translation quality appears to be largely independent of the thinking vocabulary. Significant quality gains were achieved in early-stage training (e.g., before Steps 400) before a substantial increase in response length, even in experiments conducted without the KL penalty. This suggests that performance improvements in the MT-R1-Zero setup could not be attributed solely or primarily to increasing reasoning vocabulary. + +# 6.2 Disentangling RL and Explicit Thinking + +To determine whether performance gains stem primarily from the explicit step or the underlying RL optimization, we conducted an ablation study comparing three training paradigms using the similar setup from Section 4.1: 1) Supervised Fine + +![](images/0a16468e5c467f1eb3ed748065d44da44f367778442a612e837a87eaa1935712.jpg) + +![](images/e55e14eebe1ca2bdc590ae8036f91c559c49fa5d52dfe0db21c613c1ce9d1660.jpg) +Figure 9: Effect of the KL divergence penalty on EN-ZH COMETKiwi score and response length progression for models trained with (w/ KL, $\beta = 0.01$ ) and without (w/o KL, $\beta = 0$ ) the penalty. Experiments are conducted three times with MT-R1-Zero-7B-Sem. + +Tuning (SFT): The same base model is fine-tuned on the parallel data using LLaMA-Factory (Zheng et al., 2024), establishing a non-RL baseline. 2) RL w/ thinking (MT-R1-Zero-Sem): The model is trained with the rule-metric mixed reward (Format Reward and Reward-Sem) while enforcing explicit / structure generation. 3) RL w/o thinking: The model is trained with RL-zero optimization (Reward-Sem) solely to the final output, with no constraints on explicit step generation. See Appendix B for more details. + +The results are presented in Table 3. It reveals that the "RL w/o thinking" variant achieves performance comparable to MT-R1-Zero ("RL w/ thinking") across both in-domain and OOD tasks, while both RL configurations substantially outperform + +![](images/02ab532ad654b385575826e4ea322358bc806133d469827fce79440c3868a45c.jpg) + +![](images/cc80f22abd648b0263c9273ccf57a87fad8cc6b763268813c8dafc1c7cc14e83.jpg) + +![](images/71511929065aefb7be9ca6b4f5a1077fa0d62bedbe3d5c23ded80cd558ab11c9.jpg) + +![](images/0757a8c88b15053894a803861ac938b15aece754b128ca6c399bcfc30873b808.jpg) + +![](images/d80cadc9e8ea52aa5536f11d8ae0db66b4eea2ed0b6e2458c56434863a1b4d6f.jpg) +Figure 10: Training progression (COMET-22) for multilingual MT-R1-Zero models based on LLaMA-3.1-8B and Qwen2.5-7B across multiple EN-XX test sets, demonstrating applicability in multilingual settings (Section 6.3). + +![](images/7ac057682468991acec0a5725954084977bdbadd88258f7bb7ab1f266d8066bf.jpg) + +![](images/e93f56af0eca6a1c191d82bec561e582d8affcd116cfbe1b06aeac5792b12def.jpg) + +![](images/2b806a900a9478d666e9030e40c0b37feece8287689e4230effea1f0e3383eb4.jpg) + +the SFT baseline – particularly in OOD settings. This pattern is further corroborated by evaluations on the DRT test set (Table 4), a literature translation benchmark (Wang et al., 2024a), where we again observe marginal differences between RL variants but significant gains over SFT. These findings demonstrate that while the tag could facilitate emergent reasoning patterns, the major performance improvements in MT-R1-Zero are primarily from the RL framework itself. This aligns with the intuition that online RL methods, iteratively sampling and evaluating self-generated outputs against quality metrics, principally learn "how to translate" that surpass SFT's behavior cloning limitations. + +# 6.3 Multilingual and Low-Resource Support + +To evaluate the broader applicability of our framework, we examine its effectiveness in multilingual training scenarios and its potential benefits for low-resource languages. We train multilingual MT-R1-Zero models using the Germanic language data split in the X-ALMA (Xu et al., 2024), augmented with Chinese (see Table 9 for detailed data statistics). We set the batch size to 16 and used COMET $22^{10}$ as the metric reward (Reward-Sem), consistent with the evaluation protocols in X-ALMA. All models are trained for 1 epoch on 16 NVIDIA H800 80G GPUs for about 12 hours. All other hyperparameters follow the configuration described in Section 4.1. The training progress, measured by + +COMET-22 for English-to-target directions, is depicted in Figure 10. We also report the XCOMET progression in Figure 11. + +The learning curves demonstrate consistent improvement in translation quality across languages spanning diverse resource levels, including those typically considered low-resource (e.g., Icelandic (IS) and Norwegian (NO)). The steady performance improvement observed throughout training confirms that the MT-R1-Zero framework remains effective when applied in multilingual settings. + +# 7 Conclusion + +In this work, we introduced MT-R1-Zero, the first successful adaptation of R1-Zero RL framework to MT using a novel rule-metric mixed reward mechanism that combines format enforcement with quality metrics. Our MT-R1-Zero significantly improves translation quality, achieving leading results on multiple benchmarks, i.e., our 3B models compete with much larger open-source models, while our 7B models are on par with advanced proprietary models. The MT-R1-Zero also demonstrates strong OOD generalization and multilingual applicability. Through extensive experiments and analysis, we highlight the significant impact of reward metric choice for optimization, showcase distinct adaptability across different LLMs, and reveal that performance gains are principally from the RL process itself rather than reasoning steps or morbidity, establishing R1-Zero as a viable and potent paradigm for advancing MT. More broadly, our work high- + +lights the great potential of RL for diverse language processing tasks beyond translation. + +# Limitations + +While MT-R1-Zero represents a significant advance, certain limitations remain. The emergent reasoning observed, though diverse, did not achieve the sophisticated iterative self-correction capabilities demonstrated in mathematical reasoning tasks using similar RL or R1-like methods. This discrepancy may reflect fundamental differences in task structure or indicate the need for specialized design in translation tasks. One promising direction would be developing task-specific cold-start datasets for SFT before RL optimization, though this would deviate from the pure RL paradigm we investigated here. Future work could focus on inducing deeper reasoning structures specifically beneficial for the MT task, investigating architectural adaptability across a broader range of LLMs, and developing more appropriate reward mechanisms. Exploring applications to specialized domains (e.g., law and healthcare) and general language processing tasks presents promising opportunities to extend this work. + +# References + +Duarte M Alves, José Pombal, Nuno M Guerreiro, Pedro H Martins, João Alves, Amin Farajian, Ben Peters, Ricardo Rei, Patrick Fernandes, Sweta Agrawal, et al. 2024. Tower: An open multilingual large language model for translation-related tasks. arXiv preprint arXiv:2402.17733. +Anthropic. 2024. Claude 3.5 sonnet. +Andong Chen, Yuchen Song, Wenxin Zhu, Kehai Chen, Muyun Yang, Tiejun Zhao, et al. 2025. Evaluating o1-like llms: Unlocking reasoning for translation through comprehensive analysis. arXiv preprint arXiv:2502.11544. +Marta R Costa-jussà, James Cross, Onur Celebi, Maha Elbayad, Kenneth Heafield, Kevin Heffernan, Elahe Kalbassi, Janice Lam, Daniel Licht, Jean Maillard, et al. 2022. No language left behind: Scaling human-centered machine translation. arXiv preprint arXiv:2207.04672. +Ganqu Cui, Lifan Yuan, Zefan Wang, Hanbin Wang, Wendi Li, Bingxiang He, Yuchen Fan, Tianyu Yu, Qixin Xu, Weize Chen, et al. 2025. Process reinforcement through implicit rewards. arXiv preprint arXiv:2502.01456. +DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, + +Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z. F. Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, Aixin Liu, Bing Xue, Bingxuan Wang, Bochao Wu, Bei Feng, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Qu, Hui Li, Jianzhong Guo, Jiashi Li, Jiawei Wang, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, J. L. Cai, Jiaqi Ni, Jian Liang, Jin Chen, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Liang Zhao, Litong Wang, Liyue Zhang, Lei Xu, Leyi Xia, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Meng Li, Miaojun Wang, Mingming Li, Ning Tian, Panpan Huang, Peng Zhang, Qiancheng Wang, Qinyu Chen, Qiushi Du, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, R. J. Chen, R. L. Jin, Ruyi Chen, Shanghao Lu, Shangyan Zhou, Shanhuang Chen, Shengfeng Ye, Shiyu Wang, Shuiping Yu, Shunfeng Zhou, Shuting Pan, S. S. Li, Shuang Zhou, Shaoqing Wu, Shengfeng Ye, Tao Yun, Tian Pei, Tianyu Sun T. Wang, Wangding Zeng, Wanjia Zhao, Wen Liu, Wenfeng Liang, Wenjun Gao, Wenqin Yu, Wentao Zhang W. L. Xiao Wei An Xiaodong Liu Xiaohan Wang Xiaokang Chen Xiaotao Nie, Xin Cheng Xien Liu Xie Xingchao Liu Xinyu Yang Xinyuan Li Xuecheng Su Xuheng Lin X.Q.Li Xiangyue Jin Xiaojin Shen Xiaosha Chen Xiaowen Sun Xiaoxiang Wang Xinnan Song Xinyi Zhou Xianzu Wang Xinxia Shan Y.K. Li Y.Q.WangY.X.Wei Yang Zhang Yanhong Xu Yao Li Yao Zhao Yaofeng Sun Yaohui Wang Yi Yu Yichao Zhang Yifan Shi Yiliang Xiong Ying He Yishi Piao Yisong Wang Yixuan Tan Yiyang Ma Yiyuan Liu Yongqiang Guo Yuan Ou Yuduan Wang Yue Gong Yuheng Zou Yujia He Yunfan Xiong Yuxiang Luo Yuxiang You Yuxuan Liu Yuyang Zhou Y.X.Zhu Yanhong Xu Yanping Huang Yaohui Li Yi Zheng Yuchen Zhu Yunxian Ma Ying Tang Yukun Zha Yuting Yan Z.Z.Ren Zehui Ren Zhangli Sha Zhe Fu Zhean Xu Zhenda Xie Zhengyan Zhang Zhewen Hao Zhicheng Ma Zhigang Yan Zhiyu Wu Zihui Gu Zijia Zhu Zijun Liu Zilin Li Ziwei Xie Ziyang Song Zizheng Pan Zhen Huang Zhipeng Xu Zhongyu Zhang and Zhen Zhang. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. Preprint arXiv:2501.12948. +Hugging Face. 2025. Open r1: A fully open reproduction of deepseek-r1. +Xidong Feng, Ziyu Wan, Muning Wen, Stephen Marcus McAleer, Ying Wen, Weinan Zhang, and Jun Wang. 2023. Alphazero-like tree-search can guide large language model decoding and training. arXiv preprint arXiv:2309.17179. +Zhaopeng Feng, Ruizhe Chen, Yan Zhang, Zijie Meng, and Zuozhu Liu. 2024a. Ladder: A model-agnostic framework boosting LLM-based machine translation + +to the next level. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 15377-15393, Miami, Florida, USA. Association for Computational Linguistics. +Zhaopeng Feng, Jiahan Ren, Jiayuan Su, Jiamei Zheng, Zhihang Tang, Hongwei Wang, and Zuozhu Liu. 2025. Mt-rewardtree: A comprehensive framework for advancing llm-based machine translation via reward modeling. arXiv preprint arXiv:2503.12123. +Zhaopeng Feng, Yan Zhang, Hao Li, Wenqiang Liu, Jun Lang, Yang Feng, Jian Wu, and Zuozhu Liu. 2024b. Improving llm-based machine translation with systematic self-correction. arXiv preprint arXiv:2402.16379. +Markus Freitag, George Foster, David Grangier, Viresh Ratnakar, Qijun Tan, and Wolfgang Macherey. 2021. Experts, errors, and context: A large-scale study of human evaluation for machine translation. Transactions of the Association for Computational Linguistics, 9:1460-1474. +Markus Freitag, Nitika Mathur, Chi-kiu Lo, Eleftherios Avramidis, Ricardo Rei, Brian Thompson, Tom Kocmi, Frederic Blain, Daniel Deutsch, Craig Stewart, Chrysoula Zerva, Sheila Castilho, Alon Lavie, and George Foster. 2023. Results of WMT23 metrics shared task: Metrics might be guilty but references are not innocent. In Proceedings of the Eighth Conference on Machine Translation, pages 578-628, Singapore. Association for Computational Linguistics. +Markus Freitag, Ricardo Rei, Nitika Mathur, Chi-kiu Lo, Craig Stewart, Eleftherios Avramidis, Tom Kocmi, George Foster, Alon Lavie, and André F. T. Martins. 2022. Results of WMT22 metrics shared task: Stop using BLEU – neural metrics are better and more robust. In Proceedings of the Seventh Conference on Machine Translation (WMT), pages 46–68, Abu Dhabi, United Arab Emirates (Hybrid). Association for Computational Linguistics. +Kanishk Gandhi, Ayush Chakravarthy, Anikait Singh, Nathan Lile, and Noah D Goodman. 2025. Cognitive behaviors that enable self-improving reasoners, or, four habits of highly effective stars. arXiv preprint arXiv:2503.01307. +Aaron Grattafori, Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Alex Vaughan, et al. 2024. The llama 3 herd of models. arXiv preprint arXiv:2407.21783. +Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. 2025. rstar-math: Small llms can master math reasoning with self-evolved deep thinking. arXiv preprint arXiv:2501.04519. +Nuno M Guerreiro, Ricardo Rei, Daan van Stigt, Luisa Coheur, Pierre Colombo, and Andre FT Martins. + +2024. xcomet: Transparent machine translation evaluation through fine-grained error detection. Transactions of the Association for Computational Linguistics, 12:979-995. +Minggui He, Yilun Liu, Shimin Tao, Yuanchang Luo, Hongyong Zeng, Chang Su, Li Zhang, Hongxia Ma, Daimeng Wei, Weibin Meng, et al. 2025. R1-t1: Fully incentivizing translation capability in llms via reasoning learning. arXiv preprint arXiv:2502.19735. +Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, and Heung-Yeung Shum Xiangyu Zhang. 2025. Open-reasoner-zero: An open source approach to scaling reinforcement learning on the base model. https://github.com/Open-Reasoner-Zero/Open-Reasoner-Zero. +Wenxuan Huang, Bohan Jia, Zijie Zhai, Shaosheng Cao, Zheyu Ye, Fei Zhao, Yao Hu, and Shaohui Lin. 2025. Vision-r1: Incentivizing reasoning capability in multimodal large language models. arXiv preprint arXiv:2503.06749. +Tom Kocmi, Eleftherios Avramidis, Rachel Bawden, Ondrej Bojar, Anton Dvorkovich, Christian Federmann, Mark Fishel, Markus Freitag, Thamme Gowda, Roman Grundkiewicz, et al. 2024. Preliminary wmt24 ranking of general mt systems and llms. arXiv preprint arXiv:2407.19884. +Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. 2024. Let's verify step by step. In *The Twelfth International Conference on Learning Representations*. +Sinuo Liu, Chenyang Lyu, Minghao Wu, Longyue Wang, Weihua Luo, and Kaifu Zhang. 2025. New trends for modern machine translation with large reasoning models. arXiv preprint arXiv:2503.10351. +Liangchen Luo, Yinxiao Liu, Rosanne Liu, Samrat Phatale, Harsh Lara, Yunxuan Li, Lei Shu, Yun Zhu, Lei Meng, Jiao Sun, et al. 2024. Improve mathematical reasoning in language models by automated process supervision. arXiv preprint arXiv:2406.06592. +OpenAI. 2023. GPT-4: technical work. +OpenAI. 2024. Introducing openai o1. https://openai.com/o1/. Accessed: 2024-10-02. +Kishore Papineni, Salim Roukos, Todd Ward, and Wei Jing Zhu. 2002. Bleu: a method for automatic evaluation of machine translation. In Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, pages 311-318, Philadelphia, Pennsylvania, USA. Association for Computational Linguistics. +Maja Popovic. 2015. chrF: character n-gram F-score for automatic MT evaluation. In Proceedings of the Tenth Workshop on Statistical Machine Translation, pages 392–395, Lisbon, Portugal. Association for Computational Linguistics. + +Matt Post. 2018. A call for clarity in reporting BLEU scores. In Proceedings of the Third Conference on Machine Translation: Research Papers, pages 186-191, Brussels, Belgium. Association for Computational Linguistics. +Zhenting Qi, Mingyuan Ma, Jiahang Xu, Li Lyna Zhang, Fan Yang, and Mao Yang. 2024. Mutual reasoning makes smaller llms stronger problem-solvers. arXiv preprint arXiv:2408.06195. +Ricardo Rei, Craig Stewart, Ana C Farinha, and Alon Lavie. 2020. Comet: A neural framework for mt evaluation. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 2685-2702. +Ricardo Rei, Marcos Treviso, Nuno M Guerreiro, Chrysoula Zerva, Ana C Farinha, Christine Maroti, José GC De Souza, Taisiya Glushkova, Duarte Alves, Luísca Coheur, et al. 2022. Cometkiwi: Ist-unbabel 2022 submission for the quality estimation shared task. In Proceedings of the Seventh Conference on Machine Translation (WMT), pages 634-645. +John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. 2017. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347. +Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. 2024. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300. +David Silver, Aja Huang, Chris J. Maddison, Arthur Guez, L. Sifre, George van den Driessche, Julian Schrittwieser, Ioannis Antonoglou, Vedavyas Panneershelvam, Marc Lanctot, Sander Dieleman, Dominik Grewe, John Nham, Nal Kalchbrenner, Ilya Sutskever, Timothy P. Lillicrap, Madeleine Leach, Koray Kavukcuoglu, Thore Graepel, and Demis Hassabis. 2016. Mastering the game of go with deep neural networks and tree search. Nature, 529:484-489. +Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. 2024. Scaling llm test-time compute optimally can be more effective than scaling model parameters arXiv preprint arXiv:2408.03314. +Gemini Team, Petko Georgiev, Ving Ian Lei, Ryan Burnell, Libin Bai, Anmol Gulati, Garrett Tanzer, Damien Vincent, Zhufeng Pan, Shibo Wang, et al. 2024. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530. +Kimi Team. 2025a. Kimi k1.5: Scaling reinforcement learning with llms. +Qwen Team. 2025b. Qwq-32b: Embracing the power of reinforcement learning. + +Jiaan Wang, Fandong Meng, Yunlong Liang, and Jie Zhou. 2024a. Drt-o1: Optimized deep reasoning translation via long chain-of-thought. arXiv preprint arXiv:2412.17498. +Yutong Wang, Jiali Zeng, Xuebo Liu, Fandong Meng, Jie Zhou, and Min Zhang. 2024b. Taste: Teaching large language models to translate through self-reflection. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 6144-6158. +Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837. +Violet Xiang, Charlie Snell, Kanishk Gandhi, Alon Balak, Anikait Singh, Chase Blagden, Duy Phung, Rafael Rafailov, Nathan Lile, Dakota Mahan, et al. 2025. Towards system 2 reasoning in llms: Learning how to think with meta chain-of-though. arXiv preprint arXiv:2501.04682. +Tian Xie, Zitian Gao, Qingnan Ren, Haoming Luo, Yuqian Hong, Bryan Dai, Joey Zhou, Kai Qiu, Zhirong Wu, and Chong Luo. 2025. Logic-rl: Unleashing llm reasoning with rule-based reinforcement learning. arXiv preprint arXiv:2502.14768. +Haoran Xu, Young Jin Kim, Amr Sharaf, and Hany Hassan Awadalla. 2023. A paradigm shift in machine translation: Boosting translation performance of large language models. arXiv preprint arXiv:2309.11674. +Haoran Xu, Kenton Murray, Philipp Koehn, Hieu Hoang, Akiko Eriguchi, and Huda Khayrallah. 2024. X-alma: Plug & play modules and adaptive rejection for quality translation at scale. arXiv preprint arXiv:2410.03115. +An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. 2024. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115. +Edward Yeo, Yuxuan Tong, Morry Niu, Graham Neubig, and Xiang Yue. 2025. Demystifying long chain-of-thought reasoning in llms. arXiv preprint arXiv:2502.03373. +Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, et al. 2025. Dapo: An opensource llm reinforcement learning system at scale. arXiv preprint arXiv:2503.14476. +Lifan Yuan, Wendi Li, Huayu Chen, Ganqu Cui, Ning Ding, Kaiyan Zhang, Bowen Zhou, Zhiyuan Liu, and Hao Peng. 2024. Free process rewards without process labels. arXiv preprint arXiv:2412.01981. + +Zhiyuan Zeng, Qinyuan Cheng, Zhangyue Yin, Bo Wang, Shimin Li, Yunhua Zhou, Qipeng Guo, Xuanjing Huang, and Xipeng Qiu. 2024. Scaling of search and learning: A roadmap to reproduce o1 from reinforcement learning perspective. arXiv preprint arXiv:2412.14135. + +Yu Zhao, Huifeng Yin, Bo Zeng, Hao Wang, Tianqi Shi, Chenyang Lyu, Longyue Wang, Weihua Luo, and Kaifu Zhang. 2024. Marco-o1: Towards open reasoning models for open-ended solutions. Preprint, arXiv:2411.14405. + +Yaowei Zheng, Richong Zhang, Junhao Zhang, Yanhan Ye, Zheyan Luo, Zhangchi Feng, and Yongqiang Ma. 2024. Llamafactory: Unified efficient finetuning of $100+$ language models. arXiv preprint arXiv:2403.13372. + +# A Evaluation Details + +When evaluating model performance on the test set, we deployed open-source models locally using frameworks like vLLM11 or HuggingFace12 implementations. We use the sampling decoding strategy with a temperature of 0.2, and top_p set to 0.95. The maximum generation length was capped at 1024 tokens. We adipot the prompt showcasing in Table 5 to sample the translation (applying specific chat template when needed). + +# B SFT Training Details + +For the Supervised Fine-Tuning (SFT) baseline compared in the ablation study (Section 6.2), we utilized LLaMA-Factory (Zheng et al., 2024). The SFT process started from the same base model architecture as the corresponding RL experiments (e.g., Qwen2.5-7B) and was performed on the identical parallel translation dataset (13,130 examples from WMT 2017-2020 after filtering, detailed in Section 4.1). The model was fine-tuned on 8 NVIDIA H800 80G GPUs for 2 epochs using a learning rate of 5e-6 and a batch size of 64, totaling approximately 400 training steps. + +
Inference Prompt
Translate the following text from {src_language} into {tgt_language}. {src_language}:{src_text} {tgt_language}:
+ +Table 5: Prompt used for translation generation. {tgt_language} : target language; {src_language}: source language; {src_text}: the source test sentence. + +
MODELOUT-OF-DISTRIBUTION
EN-JADE-EN (Doc)DE-ZHAvg.
Strong Baseline
Qwen2.5-72B-Instruct73.2569.1369.8970.76
LLaMA3.1-70B-Instruct71.8469.2868.6769.93
Same-size Baseline
Qwen2.5-7B-Instruct64.7967.2067.8266.60
LLaMA-3.1-8B-Instruct62.4266.7764.2864.49
TowerInstruct-7B-v0.258.3369.0365.4564.27
MT-R1-Zero-7B-Lex63.3366.1764.3264.61
MT-R1-Zero-7B-Sem72.0068.4171.5170.64
MT-R1-Zero-7B-Mix69.2768.7468.7468.92
+ +Table 6: Out-of-distribution performance comparison using the COMETKiwi metric on EN-JA, DE-EN (Doc), and DE-ZH. (Complements Table 2). + +
MODELOUT-OF-DISTRIBUTION
EN-JADE-EN (Doc)DE-ZHAvg.
Strong Baseline
Qwen2.5-72B-Instruct25.0245.5440.8337.13
LLaMA3.1-70B-Instruct24.6445.9837.8536.16
Same-size Baseline
Qwen2.5-7B-Instruct18.9141.1735.2531.78
LLaMA-3.1-8B-Instruct16.2240.2831.0829.19
TowerInstruct-7B-v0.210.5243.4034.7429.55
MT-R1-Zero-7B-Lex14.9440.0137.0030.65
MT-R1-Zero-7B-Sem14.1233.1922.8323.38
MT-R1-Zero-7B-Mix20.2743.1721.4128.28
+ +Table 7: Out-of-distribution performance comparison using the BLEU metric on EN-JA, DE-EN (Doc), and DE-ZH. (Complements Table 2). + +
TrainTest
EN-ZHZH-ENEN-ZHZH-ENEN-JADE-ENDE-ZH
# of cases6565656599719769975491012
SourceWMT 17-20WMT 24WMT 23WMT 24WMT 23Flores
+ +Table 8: Data statistics for the training and test sets used in the main experiments (EN $\rightleftharpoons$ ZH). + +
Parallel Data
Train (from EN)Train (to EN)Test (from EN)Test (to EN)Resource
Afrikaans (AF)299434110121012Mid
Danish (DA)299435510121012Mid
Dutch (NL)299440310121012High
German (DE)701588510121012High
Icelandic (IS)499467810121012Low
Norwegian (NO)299436010121012Low
Swedish (SV)299433910121012High
Chinese (ZH)690687410121012High
English (EN)-----
+ +Table 9: Parallel data statistics for languages used in multilingual experiments (Section 6.3), detailing training/test pairs and resource level classification. + +![](images/12359a347db12f6a28ac40ce29f250c9510c6ae5d91478e5f66234d84e04091a.jpg) + +![](images/b02385982b62b62aeec9e46e8b146c1ef6e44c85a8a6b24822ba3480eea684cc.jpg) + +![](images/f05ed6fe4584e687cbc0e0673a93c3498336b87980333857019826514498ecf5.jpg) + +![](images/b5e0d24285ec67b56aa28877c3937735f3d15f64b819d2c5d0e8cf1401382559.jpg) + +![](images/efb78cebd39abe04cdbd765eca2155529d96f3ffc485a6d2f61c9aa3ba7dbc1c.jpg) +Figure 11: Training progression (reference-free XCOMET score) for multilingual MT-R1-Zero models based on LLaMA-3.1-8B and Qwen2.5-7B across multiple EN-XX test sets (Section 6.3). + +![](images/9c99c3b95f99c45ba1bf2edfb43f2ce1b3e1fd63b6bcccf788a94d00ce921146.jpg) + +![](images/1b62dee80d10c919aac3871669ef0f76618e6c309f1c3e65bd72a56a1dc9961a.jpg) + +![](images/0083342aef6e593edf76e71c3ab654f7e9cf27116a34b0d643937e75add9d402.jpg) + +User:"It's not that bad, right, Kayel?" + +# Assistant: + +When approaching the task of translating the given English text into Japanese, my primary goal is to create a faithful and engaging rendition that maintains the structure, tone, and level of detail of the original while ensuring it reads smoothly and naturally in Japanese. I will pay particular attention to preserving the conversational nature of the exchange and the use of question tags ("right, Kayel?"), as well as the balance between the two characters' statements. + +In this case, I will consider the following aspects: + +1. Maintaining the conversational tone: This involves using appropriate phrasing, word order, and intonation to convey the casual nature of the conversation. + +2. Translating question tags: Japanese question tags are typically formed differently from English, so I will find an equivalent structure that maintains the flow of the conversation. + +3. Preserving the balance between the two characters' statements: I will ensure that the translation reflects the equal weight given to both Kayel's response and the speaker's initial statement in the original text. + +4. Adapting to Japanese sentence structure: Japanese often places the verb at the end of a sentence, so I will adjust the word order accordingly while maintaining the overall meaning and rhythm of the conversation. + + + +“大丈夫!也存在心。久己工儿?” + + + +# User:好看,但有点点小遗憾 + +# Assistant: + +The task at hand involves translating a sentence of Chinese into English while taking into consideration several factors to ensure an effective and appropriate translation. The source text is: “好看,但有点小遗憾。” Given the context in which this sentence is being used, it is clear that the speaker is expressing their positive opinion of something they have just seen or experienced, but also acknowledging a minor flaw or disappointment that slightly + +diminishes their overall satisfaction. With this in mind, it is important to maintain the same level of nuance and complexity in the translation to effectively convey the same level of emotional and intellectual engagement expected in professional and polished English communication. After careful consideration of these factors and the specific requirements of the translation context, I have decided to render the source text as follows: "It looks good, but there's a tiny bit of disappointment." This translation not only accurately conveys the same level of appreciation and critique as the source text but also does so in a professional and tactful manner that respects the sensitivity and complexity of the situation at hand. + + + +"It looks good, but there's a tiny bit of disappointment." + +User:北交所融资融券交易的启动是持续推进北交所市场制度建设、完善交易机制、提升二级市场定价功能的重要举措。 + +# Assistant: + +When approaching the task of translating the given Chinese text into English, my primary goal is to create a faithful and engaging rendition that maintains the structure, tone, and level of detail of the original Chinese. I will focus on preserving the balance between providing all relevant information and maintaining readability, while also considering the natural flow of the English language. Additionally, I will pay attention to the following aspects: + +1. Structure: I will aim to maintain the overall structure of the Chinese text, including the use of paragraphs and sentence order, to ensure that the English translation remains coherent and easy to follow. + +2. Tone and voice: I will strive to capture the tone and voice of the original text, which appears to be informative and objective, while also considering the appropriate level of formality for the target audience. + +3. Terminology: I will use accurate and up-to-date terminology related to the stock exchange, financing, and trading mechanisms, ensuring that all terms are clear and easily understood by the English-speaking audience. + +4. Cultural and linguistic nuances: I will pay attention to any cultural or linguistic nuances present in the Chinese text and find suitable ways to convey them in English, where possible, without compromising the clarity or readability of the translation. + +5. Number and date formats: I will convert any numbers and date formats from the Chinese system to the corresponding English system, as necessary, to ensure that the translation is both accurate and locally appropriate. + +6. Punctuation and formatting: I will use standard English punctuation and formatting conventions, while taking into account any specific requirements or preferences for the target audience or publication platform. + +By considering these aspects, I aim to produce a high-quality English translation that effectively communicates the content and intent of the original Chinese text. + + + +"The launch of financing and margin trading on the Beijing Stock Exchange is an important step in advancing the development of the exchange's market systems, improving trading mechanisms, and enhancing the secondary market's pricing capabilities." + +# User:加拿大也报告了类似的结果,只是数字较低。 + +# Assistant: + +When approaching the task of translating the given Chinese text into English, my primary goal is to create a faithful and engaging rendition that maintains the structure, tone, and level of detail of the original Chinese while ensuring it reads smoothly and naturally in English. I will focus on preserving the balance between the two sentences, maintaining the use of parallel structure where appropriate, and accurately conveying the nuances of the Chinese phrasing, including the use of numbers and the comparative nature of the information presented. + + + +"Canada also reported similar results, although the numbers were lower." + +User: to divide the 412.5 meters long, 12.9 meters wide, with a total weight of 15,000 tons of continuous beams into 96 sections. + +Assistant: To translate the given English sentence into Chinese, I would follow these steps to ensure accuracy, fidelity to the original text, and naturalness in the target language (Chinese): + +1. Understand the context and meaning of the original text: I would first read and re-read the sentence to grasp its full meaning, paying close attention to technical terms, measurements, and the overall structure of the information being conveyed. This ensures a proper translation that maintains the original message. + +2. Identify key elements and vocabulary: I would identify important numbers, measurements, and specific terms that need to be accurately translated, such as "412.5 meters," "12.9 meters," and "15,000 tons." I would also note any technical terms related to engineering or construction, such as "continuous beams" and "sections." + +3. Consider cultural and contextual nuances: While this particular sentence doesn't contain cultural references that would require adaptation, I would generally be aware of how to translate phrases or expressions that might have different connotations or usages in Chinese culture compared to English. + +# 4. Choose an appropriate translation + +strategy: For this type of technical information, I would likely use a combination of literal translation and necessary modifications to fit Chinese syntax and readability. This might involve reordering words or phrases to sound more natural in Chinese while still maintaining the original meaning. + +# 5. Translate while considering the + +audience: Since this translation is presumably for a Chinese-speaking audience, I would ensure that the translation aligns with how Chinese speakers might express similar ideas, using idiomatic expressions and terminology that would be familiar to them. + +6. Account for register and tone: Depending on the intended use of the translation (e.g., technical manual, news report, etc.), I would adjust the formality and wording to match the appropriate register. For a technical report, the language would be more formal and precise. + +7. Translate numbers and measurements: I would carefully translate the numbers and measurements given (412.5 meters, 12.9 meters, 15,000 tons) to their equivalent in Chinese, ensuring that the units of measurement are accurately reflected in the translation (e.g., using "米" for meters and "吨" for tons). + +8. Ensure coherence and flow in the translation: After translating each part of the sentence, I would review and refine the translation to ensure that it flows naturally in Chinese, making any necessary adjustments to grammar, syntax, and phrasing to create a smooth and coherent translation. + +# 9. Review and make final adjustments: + +Finally, I would review the entire translated sentence for accuracy, fluency, and naturalness in Chinese. I would make any necessary adjustments to improve the translation, checking again against the original text to ensure that all information is preserved and that there are no errors or misunderstandings. + +By following these steps, I can provide a translation that is accurate, faithful to the original text, and reads naturally in Chinese. + +将总长412.5米、宽12.9米 + +总重15000吨的连续梁分成96节。 + +Figure 12: Qualitative examples (I-V) showcasing the diverse thinking patterns generated by MT-R1-Zero models. + +# Ⅲ \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10160/images/0083342aef6e593edf76e71c3ab654f7e9cf27116a34b0d643937e75add9d402.jpg b/data/2025/2504_10xxx/2504.10160/images/0083342aef6e593edf76e71c3ab654f7e9cf27116a34b0d643937e75add9d402.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d2d3b4e79623e2e52f5b3a8732b8e0cb8cbde52c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/0083342aef6e593edf76e71c3ab654f7e9cf27116a34b0d643937e75add9d402.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df3f652d8cda7baf98e8fb41e92dac903fe6c1dafe3244c56a95eb69c0db7f31 +size 12088 diff --git a/data/2025/2504_10xxx/2504.10160/images/02ab532ad654b385575826e4ea322358bc806133d469827fce79440c3868a45c.jpg b/data/2025/2504_10xxx/2504.10160/images/02ab532ad654b385575826e4ea322358bc806133d469827fce79440c3868a45c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..84671e71a811fe9f969ec914dc7f514bcc46f29e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/02ab532ad654b385575826e4ea322358bc806133d469827fce79440c3868a45c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:adaf5962bb3a713a9e3bac8a4d03fe9d53e3c83d82a7ed13fd4f70c7cbb344fe +size 13479 diff --git a/data/2025/2504_10xxx/2504.10160/images/034ce8e5accf4bb3654e49f621aeaacb12a6b6db4cfd17362d73b45299050156.jpg b/data/2025/2504_10xxx/2504.10160/images/034ce8e5accf4bb3654e49f621aeaacb12a6b6db4cfd17362d73b45299050156.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9ba974f8b3b6482619371696487695589e729648 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/034ce8e5accf4bb3654e49f621aeaacb12a6b6db4cfd17362d73b45299050156.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:938c4a380877bb4b8d7634576d4e8cb3923e464e902e4ac5ba7e13488994b2c0 +size 20750 diff --git a/data/2025/2504_10xxx/2504.10160/images/0757a8c88b15053894a803861ac938b15aece754b128ca6c399bcfc30873b808.jpg b/data/2025/2504_10xxx/2504.10160/images/0757a8c88b15053894a803861ac938b15aece754b128ca6c399bcfc30873b808.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dc7efd345e801852f233624bd96b90c434ca1d69 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/0757a8c88b15053894a803861ac938b15aece754b128ca6c399bcfc30873b808.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc6d5ffb45b691f351e9bf46f9ab12ec95977999203d4d9f8fa803f0b22ab91c +size 11932 diff --git a/data/2025/2504_10xxx/2504.10160/images/0a16468e5c467f1eb3ed748065d44da44f367778442a612e837a87eaa1935712.jpg b/data/2025/2504_10xxx/2504.10160/images/0a16468e5c467f1eb3ed748065d44da44f367778442a612e837a87eaa1935712.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4f6085d6746c76f42394d9954c6fa7466a873c93 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/0a16468e5c467f1eb3ed748065d44da44f367778442a612e837a87eaa1935712.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:faccc0beed1328ec3433f8d5bbbb79e3a80b622e602053ff6349cc37cfbc43ff +size 18493 diff --git a/data/2025/2504_10xxx/2504.10160/images/0f9aa36facffeb85c2cb9ac5dc34c7b7c25e508328d27a804f4f1b9adf6f3c9f.jpg b/data/2025/2504_10xxx/2504.10160/images/0f9aa36facffeb85c2cb9ac5dc34c7b7c25e508328d27a804f4f1b9adf6f3c9f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..40f0cf1a0e2b66d8b7d7963e2e92753a3067b6ef --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/0f9aa36facffeb85c2cb9ac5dc34c7b7c25e508328d27a804f4f1b9adf6f3c9f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e551343a6486be9917dc52832770215a02322c8768a494e54c71d548c6b9a3c +size 36281 diff --git a/data/2025/2504_10xxx/2504.10160/images/12359a347db12f6a28ac40ce29f250c9510c6ae5d91478e5f66234d84e04091a.jpg b/data/2025/2504_10xxx/2504.10160/images/12359a347db12f6a28ac40ce29f250c9510c6ae5d91478e5f66234d84e04091a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..29a460722a4f98adf3cc627821be9dea7736892e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/12359a347db12f6a28ac40ce29f250c9510c6ae5d91478e5f66234d84e04091a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4d145f65c9c7a6544baa85a9e9dbf46ed7041dc4ac53f282d84721d21456f5f +size 12260 diff --git a/data/2025/2504_10xxx/2504.10160/images/1b62dee80d10c919aac3871669ef0f76618e6c309f1c3e65bd72a56a1dc9961a.jpg b/data/2025/2504_10xxx/2504.10160/images/1b62dee80d10c919aac3871669ef0f76618e6c309f1c3e65bd72a56a1dc9961a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c6a13908f2764a0e9522589746837ccc20cce512 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/1b62dee80d10c919aac3871669ef0f76618e6c309f1c3e65bd72a56a1dc9961a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:484190cef996b2360c2563e24a3b1afa0f181cd0e16602b0e3f4b7eb9a61b019 +size 11789 diff --git a/data/2025/2504_10xxx/2504.10160/images/1b66e6c2a22b2c8383e6ce327aa237f85185f220afb86525d8f7223f066d8e8c.jpg b/data/2025/2504_10xxx/2504.10160/images/1b66e6c2a22b2c8383e6ce327aa237f85185f220afb86525d8f7223f066d8e8c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1bc34be80d99efa64c4233306d2ecad084a88998 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/1b66e6c2a22b2c8383e6ce327aa237f85185f220afb86525d8f7223f066d8e8c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:039bf892ca5d9672b65edbf38c890b75a9bee2432295764afd02cc5852f8340c +size 31360 diff --git a/data/2025/2504_10xxx/2504.10160/images/22f396b2e96f47bf65e8777892aa22c60a338ae018f764bcd309f20b1afe93a1.jpg b/data/2025/2504_10xxx/2504.10160/images/22f396b2e96f47bf65e8777892aa22c60a338ae018f764bcd309f20b1afe93a1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..583aa3afc6ff126a7302ffc04d16624ba5689fa4 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/22f396b2e96f47bf65e8777892aa22c60a338ae018f764bcd309f20b1afe93a1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db85ff32f950e7fcf5b71d193a21acaaed35c435e5663a43ff1bf49b0fc9aa30 +size 49811 diff --git a/data/2025/2504_10xxx/2504.10160/images/23089aaf58a392db50729b3ce583e9bf14970ec78ddace94058d3f25fd77b580.jpg b/data/2025/2504_10xxx/2504.10160/images/23089aaf58a392db50729b3ce583e9bf14970ec78ddace94058d3f25fd77b580.jpg new file mode 100644 index 0000000000000000000000000000000000000000..39acc7530bfd4bd9ae41d15da88bae56f8d75a21 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/23089aaf58a392db50729b3ce583e9bf14970ec78ddace94058d3f25fd77b580.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf2dbb30e44a6b64d5d2c58e3125d011dd05d26358f8c56c4e9037790034a285 +size 20271 diff --git a/data/2025/2504_10xxx/2504.10160/images/2b806a900a9478d666e9030e40c0b37feece8287689e4230effea1f0e3383eb4.jpg b/data/2025/2504_10xxx/2504.10160/images/2b806a900a9478d666e9030e40c0b37feece8287689e4230effea1f0e3383eb4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..89542d339faea7e3f711c7d8b65122b582dea1d3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/2b806a900a9478d666e9030e40c0b37feece8287689e4230effea1f0e3383eb4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f9dc9f631b2349f8243aa4168a1abf29012102d362925d9a5d1d9c2e484e5e1 +size 12050 diff --git a/data/2025/2504_10xxx/2504.10160/images/3fd6b1496e8692eacee3bd2a1d2b0277004e03a36cfe84b1be7952860d106bbd.jpg b/data/2025/2504_10xxx/2504.10160/images/3fd6b1496e8692eacee3bd2a1d2b0277004e03a36cfe84b1be7952860d106bbd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..89951d18ae5e68a3c526f336a0a99de5e501a2eb --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/3fd6b1496e8692eacee3bd2a1d2b0277004e03a36cfe84b1be7952860d106bbd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b7ac4b91e6189d2c2d2f712b058546c5821758ca6bd15e261311feec2f0d8bd +size 29340 diff --git a/data/2025/2504_10xxx/2504.10160/images/6c56dc9a984522cc0e741f0de6a2bac59cf804d50995fb0411cd7ea83591766e.jpg b/data/2025/2504_10xxx/2504.10160/images/6c56dc9a984522cc0e741f0de6a2bac59cf804d50995fb0411cd7ea83591766e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c6241b06d0918897d90a02bb706cf5e4d81ab5e3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/6c56dc9a984522cc0e741f0de6a2bac59cf804d50995fb0411cd7ea83591766e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74603e77df42366aeb68873133d5ea37cd50eed63ef3508c82c7a3fcf21b0186 +size 16177 diff --git a/data/2025/2504_10xxx/2504.10160/images/6f616ff4dcdccce81b29cf948d93e5785e99354f0a92b7b75e4b01182a86d233.jpg b/data/2025/2504_10xxx/2504.10160/images/6f616ff4dcdccce81b29cf948d93e5785e99354f0a92b7b75e4b01182a86d233.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5f51ac8a5769f14f901303aa442788ca81c63a61 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/6f616ff4dcdccce81b29cf948d93e5785e99354f0a92b7b75e4b01182a86d233.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5a5997dc2d538ee931d04c5f9289e685851991ee259a02e969ab6f6c7f9601e +size 34209 diff --git a/data/2025/2504_10xxx/2504.10160/images/71511929065aefb7be9ca6b4f5a1077fa0d62bedbe3d5c23ded80cd558ab11c9.jpg b/data/2025/2504_10xxx/2504.10160/images/71511929065aefb7be9ca6b4f5a1077fa0d62bedbe3d5c23ded80cd558ab11c9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a51861d0a6d42f2adc00157ff27a70eb929b5af2 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/71511929065aefb7be9ca6b4f5a1077fa0d62bedbe3d5c23ded80cd558ab11c9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5c0a968572097a22b989ffce151c38219924032e22b12d6e3c0a2c8ddee22cc +size 11411 diff --git a/data/2025/2504_10xxx/2504.10160/images/7ac057682468991acec0a5725954084977bdbadd88258f7bb7ab1f266d8066bf.jpg b/data/2025/2504_10xxx/2504.10160/images/7ac057682468991acec0a5725954084977bdbadd88258f7bb7ab1f266d8066bf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..48659d32f70241e527f54f81aa9b5865fcbbfebd --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/7ac057682468991acec0a5725954084977bdbadd88258f7bb7ab1f266d8066bf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93e57df40b390eaf7e6a6554f282eba7cb168614f0a4e2b1ff7d47e549ed109b +size 12335 diff --git a/data/2025/2504_10xxx/2504.10160/images/9c99c3b95f99c45ba1bf2edfb43f2ce1b3e1fd63b6bcccf788a94d00ce921146.jpg b/data/2025/2504_10xxx/2504.10160/images/9c99c3b95f99c45ba1bf2edfb43f2ce1b3e1fd63b6bcccf788a94d00ce921146.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4b258826c162295ed4ce8009a9154f216eda1fe0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/9c99c3b95f99c45ba1bf2edfb43f2ce1b3e1fd63b6bcccf788a94d00ce921146.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67f84544f6c6c69131c82b0fc69d931f33c99baaae7b48be138b98e3cfa3b842 +size 12479 diff --git a/data/2025/2504_10xxx/2504.10160/images/9feac2b9c959db1f4447139b0a49da8626efb382b3b1ce588da0a31cc3132242.jpg b/data/2025/2504_10xxx/2504.10160/images/9feac2b9c959db1f4447139b0a49da8626efb382b3b1ce588da0a31cc3132242.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bb9a1d643247a8c1a778a4407aee27feedea65a9 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/9feac2b9c959db1f4447139b0a49da8626efb382b3b1ce588da0a31cc3132242.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:767e27130be7736dbd64ad79310983c3347a452fb1111da66cdd0f50921412fd +size 73648 diff --git a/data/2025/2504_10xxx/2504.10160/images/aeb6fe232c20e93a9294e931828401ff289284f573bd6eaeb2a718bc2b332396.jpg b/data/2025/2504_10xxx/2504.10160/images/aeb6fe232c20e93a9294e931828401ff289284f573bd6eaeb2a718bc2b332396.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d4042d88fd27458f0bfc71899b5ccea72ea9c9d5 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/aeb6fe232c20e93a9294e931828401ff289284f573bd6eaeb2a718bc2b332396.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a397dacb4d1c857d8c8bb8f36c8767f98ca82e17f036b502908d9afc8c9690ea +size 161824 diff --git a/data/2025/2504_10xxx/2504.10160/images/af5d7b3938351799e5ac948de228b3fbb521d21706b10eec2d5ab3231e32afd1.jpg b/data/2025/2504_10xxx/2504.10160/images/af5d7b3938351799e5ac948de228b3fbb521d21706b10eec2d5ab3231e32afd1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6b445e75a4151cdc8cc5cfc9a9475f01e3217892 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/af5d7b3938351799e5ac948de228b3fbb521d21706b10eec2d5ab3231e32afd1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7abad9215524ea6aa5cbb718b688db7acd047310496b9d737bd40b2e150cdae6 +size 19987 diff --git a/data/2025/2504_10xxx/2504.10160/images/b02385982b62b62aeec9e46e8b146c1ef6e44c85a8a6b24822ba3480eea684cc.jpg b/data/2025/2504_10xxx/2504.10160/images/b02385982b62b62aeec9e46e8b146c1ef6e44c85a8a6b24822ba3480eea684cc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..270d494719689ee5cb9972cd3d09b8416d3da9e7 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/b02385982b62b62aeec9e46e8b146c1ef6e44c85a8a6b24822ba3480eea684cc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e7d40d3ca03c57a940087843b401212ff5168c54d112e09a5f71e528ad629ac +size 11948 diff --git a/data/2025/2504_10xxx/2504.10160/images/b0f8de023d87072cb76332d6598f440181ff0dd4eb685e2fd493c511dac131f7.jpg b/data/2025/2504_10xxx/2504.10160/images/b0f8de023d87072cb76332d6598f440181ff0dd4eb685e2fd493c511dac131f7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2b53bab52ffafd9643083e9b47611da05e26d139 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/b0f8de023d87072cb76332d6598f440181ff0dd4eb685e2fd493c511dac131f7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f658a7baa5652f17d78b3f14a036ba4a0290dea4f98644b7f540fe684906075 +size 16942 diff --git a/data/2025/2504_10xxx/2504.10160/images/b5e0d24285ec67b56aa28877c3937735f3d15f64b819d2c5d0e8cf1401382559.jpg b/data/2025/2504_10xxx/2504.10160/images/b5e0d24285ec67b56aa28877c3937735f3d15f64b819d2c5d0e8cf1401382559.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aa986a3d0b3c348c0c23998acf8d9854c0620350 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/b5e0d24285ec67b56aa28877c3937735f3d15f64b819d2c5d0e8cf1401382559.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2b76f42f17daacd4011ad10c6379674669948c6e09b49b4c176a41aec403403 +size 12287 diff --git a/data/2025/2504_10xxx/2504.10160/images/ba5a6d095eeb40acfd1aa4e6f18c36e661f2b8ecf8e4bc67ac76bd24ea907ff4.jpg b/data/2025/2504_10xxx/2504.10160/images/ba5a6d095eeb40acfd1aa4e6f18c36e661f2b8ecf8e4bc67ac76bd24ea907ff4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3a41f1263465b169dee6cea39fe3988f2660b13a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/ba5a6d095eeb40acfd1aa4e6f18c36e661f2b8ecf8e4bc67ac76bd24ea907ff4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9899dd4cfb8843832700f110339e5b1b4da8688d6fdac7ba17ec20f50c3c7dac +size 212055 diff --git a/data/2025/2504_10xxx/2504.10160/images/c14738ed2a90fea60765a305cf9e025702d36c8cc35a56742176cf90905b8840.jpg b/data/2025/2504_10xxx/2504.10160/images/c14738ed2a90fea60765a305cf9e025702d36c8cc35a56742176cf90905b8840.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eacebbc2e7c29e9f167e7c598e6a0ea6937bc194 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/c14738ed2a90fea60765a305cf9e025702d36c8cc35a56742176cf90905b8840.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb78ff346b38a428dccad158c582c853258fd9e1628a1baec945cf2259cab10d +size 41160 diff --git a/data/2025/2504_10xxx/2504.10160/images/c383c95349861f941ebf06b185adff0fab2153b9e73c7a737770d34165272b50.jpg b/data/2025/2504_10xxx/2504.10160/images/c383c95349861f941ebf06b185adff0fab2153b9e73c7a737770d34165272b50.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fa0b63c6a2fee7196da5d1c67387fcb9e8a22e07 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/c383c95349861f941ebf06b185adff0fab2153b9e73c7a737770d34165272b50.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:885703d21d87c0369dabb3572118d53fac57ce553483975dcf25ec1ceac15713 +size 23431 diff --git a/data/2025/2504_10xxx/2504.10160/images/c68080def505d3b55351519d62c72fbac53f65b34aaeb4005232f5ce0d4e46b6.jpg b/data/2025/2504_10xxx/2504.10160/images/c68080def505d3b55351519d62c72fbac53f65b34aaeb4005232f5ce0d4e46b6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dbe6e6d94856a870efe6824a5aac7475c4373980 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/c68080def505d3b55351519d62c72fbac53f65b34aaeb4005232f5ce0d4e46b6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:430a8446e4cb3c85a25a55a672b33749a9950943ef06a91599b476757eda36aa +size 22935 diff --git a/data/2025/2504_10xxx/2504.10160/images/cc80f22abd648b0263c9273ccf57a87fad8cc6b763268813c8dafc1c7cc14e83.jpg b/data/2025/2504_10xxx/2504.10160/images/cc80f22abd648b0263c9273ccf57a87fad8cc6b763268813c8dafc1c7cc14e83.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4e46072429ea5ca6d7be6e8053518c927a1210dd --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/cc80f22abd648b0263c9273ccf57a87fad8cc6b763268813c8dafc1c7cc14e83.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfa846642d0b7ce5935186d05eb33af5cbc6298795c9aa4a3425e4d5e781f977 +size 11832 diff --git a/data/2025/2504_10xxx/2504.10160/images/ccde1d795737b77b035a78d78d9a3d8caaca032767489bbe456ecb086b0bf4cd.jpg b/data/2025/2504_10xxx/2504.10160/images/ccde1d795737b77b035a78d78d9a3d8caaca032767489bbe456ecb086b0bf4cd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5392db4bd9d840d39ebc419e156070769e2a82ce --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/ccde1d795737b77b035a78d78d9a3d8caaca032767489bbe456ecb086b0bf4cd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3afd734ee47486208ade2012da28c62e14e67ac6a3214467c955e486c8ecb5e8 +size 22930 diff --git a/data/2025/2504_10xxx/2504.10160/images/ce703c30b6d4f759ae322599c25adbf3b556fc22b8114be045e5c526169a0056.jpg b/data/2025/2504_10xxx/2504.10160/images/ce703c30b6d4f759ae322599c25adbf3b556fc22b8114be045e5c526169a0056.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0e539a3141116860d3303b9e9d999e8572d753e8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/ce703c30b6d4f759ae322599c25adbf3b556fc22b8114be045e5c526169a0056.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f662a2792f8b2d4823fe04d585d746bbc9d9d2c81e05983795ff0540c00dfdfc +size 8755 diff --git a/data/2025/2504_10xxx/2504.10160/images/d091b63c34fab3acdd1a2db73581403ba0aaffa3a30b26bc7b4f08eb9981e3cc.jpg b/data/2025/2504_10xxx/2504.10160/images/d091b63c34fab3acdd1a2db73581403ba0aaffa3a30b26bc7b4f08eb9981e3cc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d49d42a007a2ae44131b74a55362c805a018cb63 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/d091b63c34fab3acdd1a2db73581403ba0aaffa3a30b26bc7b4f08eb9981e3cc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e74acbf31b77ba1a1e74f6c42214d0fa9215382b2e93f56eb8169bfcc98b49bc +size 26989 diff --git a/data/2025/2504_10xxx/2504.10160/images/d80cadc9e8ea52aa5536f11d8ae0db66b4eea2ed0b6e2458c56434863a1b4d6f.jpg b/data/2025/2504_10xxx/2504.10160/images/d80cadc9e8ea52aa5536f11d8ae0db66b4eea2ed0b6e2458c56434863a1b4d6f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ce7e0b3a066dfecee3412ece0c7ae6e16593789c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/d80cadc9e8ea52aa5536f11d8ae0db66b4eea2ed0b6e2458c56434863a1b4d6f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98feb75f9b48e51717aaf74e80d3910820d4b4608d9857bcdde5cf9f514365b5 +size 11492 diff --git a/data/2025/2504_10xxx/2504.10160/images/e06054696095bd5b5b33af33f93854ce771daa3bb99d030ff2d6e11f1ddb8da5.jpg b/data/2025/2504_10xxx/2504.10160/images/e06054696095bd5b5b33af33f93854ce771daa3bb99d030ff2d6e11f1ddb8da5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..555bfec7017394b83a9980fc7dedbf2c0eb6711d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/e06054696095bd5b5b33af33f93854ce771daa3bb99d030ff2d6e11f1ddb8da5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69334c3ff7829cb5a937a343a652b635504cfcc74cea2efb8564605207970ff1 +size 41936 diff --git a/data/2025/2504_10xxx/2504.10160/images/e41cbd9c8447d649ac024c04363a29dd3ca5bfeb740d405db90050c2446bbc4f.jpg b/data/2025/2504_10xxx/2504.10160/images/e41cbd9c8447d649ac024c04363a29dd3ca5bfeb740d405db90050c2446bbc4f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..062d4aec874b70ce55fce422ef7c899a3bdd93d8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/e41cbd9c8447d649ac024c04363a29dd3ca5bfeb740d405db90050c2446bbc4f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af0da20214007e481f993ec707f64351746adf3d6c75f5587e282451d8e71fab +size 12917 diff --git a/data/2025/2504_10xxx/2504.10160/images/e51b8bbce116110722fb78a02080c7e65321d8200b84ae803a6baa624079eac9.jpg b/data/2025/2504_10xxx/2504.10160/images/e51b8bbce116110722fb78a02080c7e65321d8200b84ae803a6baa624079eac9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fc91e14f626e03989a63e605a1089ec29876ed0e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/e51b8bbce116110722fb78a02080c7e65321d8200b84ae803a6baa624079eac9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2aa2b5bab459ae8d0d665ae68cf0ba178a56c9922de96ebeedb291e4f6e9a70 +size 40841 diff --git a/data/2025/2504_10xxx/2504.10160/images/e55e14eebe1ca2bdc590ae8036f91c559c49fa5d52dfe0db21c613c1ce9d1660.jpg b/data/2025/2504_10xxx/2504.10160/images/e55e14eebe1ca2bdc590ae8036f91c559c49fa5d52dfe0db21c613c1ce9d1660.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6c92682087af99be68ab055f9131293ec54c9dd2 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/e55e14eebe1ca2bdc590ae8036f91c559c49fa5d52dfe0db21c613c1ce9d1660.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d23d3ab2e0acb4c8d54f7197a60f67a9023f4bbde0de7c747e1c625501f04854 +size 23033 diff --git a/data/2025/2504_10xxx/2504.10160/images/e69942eb747b7718124ac8b6d797571fd31e5f654d60911f1102bcd929edc478.jpg b/data/2025/2504_10xxx/2504.10160/images/e69942eb747b7718124ac8b6d797571fd31e5f654d60911f1102bcd929edc478.jpg new file mode 100644 index 0000000000000000000000000000000000000000..96dc7cdf7a48bbd7b238a46406caad7848b8bb8e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/e69942eb747b7718124ac8b6d797571fd31e5f654d60911f1102bcd929edc478.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f53b81c7a7a8fc5741d6360db89a62de10ed0e0212c75f5cd4d4a0a43e2a66c9 +size 8213 diff --git a/data/2025/2504_10xxx/2504.10160/images/e93f56af0eca6a1c191d82bec561e582d8affcd116cfbe1b06aeac5792b12def.jpg b/data/2025/2504_10xxx/2504.10160/images/e93f56af0eca6a1c191d82bec561e582d8affcd116cfbe1b06aeac5792b12def.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9fdfad962d588ce4efb4048016002f46bbb98154 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/e93f56af0eca6a1c191d82bec561e582d8affcd116cfbe1b06aeac5792b12def.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49e0e4e9ea9b54fe4f26c613915ba13849660a35ed4578104383cb0affc7e23f +size 11749 diff --git a/data/2025/2504_10xxx/2504.10160/images/efb78cebd39abe04cdbd765eca2155529d96f3ffc485a6d2f61c9aa3ba7dbc1c.jpg b/data/2025/2504_10xxx/2504.10160/images/efb78cebd39abe04cdbd765eca2155529d96f3ffc485a6d2f61c9aa3ba7dbc1c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4f7741854eb56ef06a9d8b9501cce19713de8646 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/efb78cebd39abe04cdbd765eca2155529d96f3ffc485a6d2f61c9aa3ba7dbc1c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e16e5f6cecdd9ca3ba422a0f4ff19a17a73913837863ab8b4f2323303b5ee854 +size 11402 diff --git a/data/2025/2504_10xxx/2504.10160/images/f05ed6fe4584e687cbc0e0673a93c3498336b87980333857019826514498ecf5.jpg b/data/2025/2504_10xxx/2504.10160/images/f05ed6fe4584e687cbc0e0673a93c3498336b87980333857019826514498ecf5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4607937b9220a7f1abb4c269ffa1026e39a125e0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/images/f05ed6fe4584e687cbc0e0673a93c3498336b87980333857019826514498ecf5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d54dc34af1a9d02ccd43954c4e47f905b63215d125bdd9a14d923ef92aa513e7 +size 10688 diff --git a/data/2025/2504_10xxx/2504.10160/layout.json b/data/2025/2504_10xxx/2504.10160/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..c9f8910e4750f12e21ee8f82167f233449888037 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10160/layout.json @@ -0,0 +1,14124 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 75, + 489, + 110 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 75, + 489, + 110 + ], + "spans": [ + { + "bbox": [ + 105, + 75, + 489, + 110 + ], + "type": "text", + "content": "MT-R1-Zero: Advancing LLM-based Machine Translation via R1-Zero-like Reinforcement Learning" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 127, + 123, + 465, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 123, + 465, + 138 + ], + "spans": [ + { + "bbox": [ + 127, + 123, + 465, + 138 + ], + "type": "text", + "content": "Zhaopeng Feng" + }, + { + "bbox": [ + 127, + 123, + 465, + 138 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 127, + 123, + 465, + 138 + ], + "type": "text", + "content": " Shaosheng Cao" + }, + { + "bbox": [ + 127, + 123, + 465, + 138 + ], + "type": "inline_equation", + "content": "^{2\\dagger}" + }, + { + "bbox": [ + 127, + 123, + 465, + 138 + ], + "type": "text", + "content": " Jiahan Ren" + }, + { + "bbox": [ + 127, + 123, + 465, + 138 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 127, + 123, + 465, + 138 + ], + "type": "text", + "content": " Jiayuan Su" + }, + { + "bbox": [ + 127, + 123, + 465, + 138 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 92, + 138, + 502, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 138, + 502, + 152 + ], + "spans": [ + { + "bbox": [ + 92, + 138, + 502, + 152 + ], + "type": "text", + "content": "Ruizhe Chen" + }, + { + "bbox": [ + 92, + 138, + 502, + 152 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 92, + 138, + 502, + 152 + ], + "type": "text", + "content": " Yan Zhang" + }, + { + "bbox": [ + 92, + 138, + 502, + 152 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 92, + 138, + 502, + 152 + ], + "type": "text", + "content": " Zhe Xu" + }, + { + "bbox": [ + 92, + 138, + 502, + 152 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 92, + 138, + 502, + 152 + ], + "type": "text", + "content": " Yao Hu" + }, + { + "bbox": [ + 92, + 138, + 502, + 152 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 92, + 138, + 502, + 152 + ], + "type": "text", + "content": " Jian Wu" + }, + { + "bbox": [ + 92, + 138, + 502, + 152 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 92, + 138, + 502, + 152 + ], + "type": "text", + "content": " Zuozhu Liu" + }, + { + "bbox": [ + 92, + 138, + 502, + 152 + ], + "type": "inline_equation", + "content": "^{1\\dagger}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 193, + 152, + 399, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 193, + 152, + 399, + 166 + ], + "spans": [ + { + "bbox": [ + 193, + 152, + 399, + 166 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 193, + 152, + 399, + 166 + ], + "type": "text", + "content": "Zhejiang University " + }, + { + "bbox": [ + 193, + 152, + 399, + 166 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 193, + 152, + 399, + 166 + ], + "type": "text", + "content": "Xiaohongshu Inc." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 180, + 167, + 414, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 180, + 167, + 414, + 179 + ], + "spans": [ + { + "bbox": [ + 180, + 167, + 414, + 179 + ], + "type": "text", + "content": "{zhaopeng.23,zuozhuliu}@intl.zju.edu.cn" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 168, + 180, + 427, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 168, + 180, + 427, + 193 + ], + "spans": [ + { + "bbox": [ + 168, + 180, + 427, + 193 + ], + "type": "text", + "content": "{caoshaosheng,qiete,xiahou}@xiaohongshu.com" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "spans": [ + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 85, + 243, + 274, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 243, + 274, + 673 + ], + "spans": [ + { + "bbox": [ + 85, + 243, + 274, + 673 + ], + "type": "text", + "content": "Large-scale reinforcement learning (RL) methods have proven highly effective in enhancing the reasoning abilities of large language models (LLMs), particularly for tasks with verifiable solutions such as mathematics and coding. However, applying this idea to machine translation (MT), where outputs are flexibly formatted and difficult to automatically evaluate with explicit rules, remains underexplored. In this work, we introduce MT-R1-Zero, the first open-source adaptation of the R1-Zero RL framework for MT without supervised fine-tuning or cold-start. We propose a rule-metric mixed reward mechanism to guide LLMs towards improved translation quality via emergent reasoning. On the WMT 24 English-Chinese benchmark, our MT-R1-Zero3B-Mix achieves competitive performance, surpassing TowerInstruct-7B-v0.2 by an average of 1.26 points. Meanwhile, our MT-R1-Zero7B-Mix attains a high average score of 62.25 across all metrics, placing it on par with advanced proprietary models such as GPT-4o and Claude-3.5-Sonnet, while the MT-R1-Zero7B-Sem variant achieves state-of-the-art scores on semantic metrics. Moreover, our work exhibits strong generalization capabilities on out-of-distribution MT tasks, robustly supporting multilingual and low-resource settings. Extensive analysis of model behavior across different initializations and reward metrics offers pioneering insight into the critical role of reward design, LLM adaptability, training dynamics, and emergent reasoning patterns within the R1-Zero paradigm for MT. Our code is available at https://github.com/fzp0424/MT-R1-Zero." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 68, + 693, + 154, + 706 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 693, + 154, + 706 + ], + "spans": [ + { + "bbox": [ + 68, + 693, + 154, + 706 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 715, + 291, + 755 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 715, + 291, + 755 + ], + "spans": [ + { + "bbox": [ + 67, + 715, + 291, + 755 + ], + "type": "text", + "content": "Large-scale Reinforcement Learning (RL) has empowered Large Language Models (LLMs) with strong reasoning capabilities (OpenAI, 2024; Team," + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 308, + 218, + 520, + 389 + ], + "blocks": [ + { + "bbox": [ + 308, + 218, + 520, + 389 + ], + "lines": [ + { + "bbox": [ + 308, + 218, + 520, + 389 + ], + "spans": [ + { + "bbox": [ + 308, + 218, + 520, + 389 + ], + "type": "image", + "image_path": "6f616ff4dcdccce81b29cf948d93e5785e99354f0a92b7b75e4b01182a86d233.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 398, + 527, + 447 + ], + "lines": [ + { + "bbox": [ + 302, + 398, + 527, + 447 + ], + "spans": [ + { + "bbox": [ + 302, + 398, + 527, + 447 + ], + "type": "text", + "content": "Figure 1: Performance comparison of contemporary LLM-based translation systems on the WMT 24 EN-ZH test set, plotted by average score across BLEU, COMETKiwi, and XCOMET versus model size." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 462, + 526, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 462, + 526, + 665 + ], + "spans": [ + { + "bbox": [ + 302, + 462, + 526, + 665 + ], + "type": "text", + "content": "2025a,b), demonstrating significant success in tasks such as mathematical reasoning or coding in which answers can be clearly verified. In particular, DeepSeek-R1-Zero (DeepSeek-AI et al., 2025) introduced a pure rule-based RL approach that directly fosters emergent reasoning ability without requirements on structured Chain-of-Thought (CoT) data (Wei et al., 2022; Cui et al., 2025) or sophisticated techniques such as Monte Carlo Tree Search (MCTS) (Silver et al., 2016; Luo et al., 2024; Qi et al., 2024; Guan et al., 2025). However, the applicability of these methods to machine translation (MT) remains challenging and underexplored, as MT outputs are flexibly generated and hard to evaluate automatically with explicit rules." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 667, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 667, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 667, + 526, + 775 + ], + "type": "text", + "content": "Recent work has launched attempts to empower LLMs for MT with reasoning capabilities (Chen et al., 2025; Liu et al., 2025). Early studies investigate explicit reasoning methods for improved translation, such as finetuning with CoT (Wang et al., 2024a) or MCTS (Zhao et al., 2024), where advanced multi-step pipelines with self-correction or long-thought agentic mechanisms are further ex" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 13, + 259, + 36, + 609 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 13, + 259, + 36, + 609 + ], + "spans": [ + { + "bbox": [ + 13, + 259, + 36, + 609 + ], + "type": "text", + "content": "arXiv:2504.10160v1 [cs.CL] 14 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 84, + 762, + 176, + 775 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 762, + 176, + 775 + ], + "spans": [ + { + "bbox": [ + 84, + 762, + 176, + 775 + ], + "type": "text", + "content": "† Corresponding author." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 293, + 793, + 299, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 793, + 299, + 803 + ], + "spans": [ + { + "bbox": [ + 293, + 793, + 299, + 803 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 293, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 293, + 248 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 293, + 248 + ], + "type": "text", + "content": "plored (Feng et al., 2024b; Wang et al., 2024b,a). Another line of work leverages RL to empower LLMs for MT through process reward models or supervised finetuning (SFT) with manually annotated CoT data (Feng et al., 2025; He et al., 2025). However, these methods often depend on manually designed or synthetically generated structured CoT data, rely on complex search algorithms, or require explicit multi-stage prompting, leaving the potential of pure RL-based approaches largely unexplored. Furthermore, the performance reported in these studies often lags behind state-of-the-art (SoTA) open-source or proprietary models." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 248, + 292, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 248, + 292, + 462 + ], + "spans": [ + { + "bbox": [ + 69, + 248, + 292, + 462 + ], + "type": "text", + "content": "Developing pure RL methods to directly enhance the reasoning ability of LLMs for better translation requires answering three key questions: 1) Feasibility: How to design R1-Zero-like RL pipelines with effective reward signals to directly solve MT tasks without binary rule-based rewards; 2) Reasoning capability: Could pure RL training cultivate emergent reasoning abilities and induce models to generate explicit thinking patterns for MT, such as multi-step CoT or verification/reflection; 3) Generalizability: Could the training paradigm generalize across different models (e.g., pre-trained base models, instruction-tuned models, or models pretrained on translation data) or diverse downstream settings (e.g., out-of-distribution, multilingual or low-resource scenarios)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 464, + 292, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 464, + 292, + 775 + ], + "spans": [ + { + "bbox": [ + 69, + 464, + 292, + 775 + ], + "type": "text", + "content": "In this work, we introduce MT-R1-Zero, the first open-source implementation that extends the RL-Zero-like RL training paradigm to MT. We propose a rule-metric mixed reward mechanism that adapts the original rule-based reward concept to effectively guide training in MT scenarios. We explore different rewards optimizing over lexical (Lex), semantic (Sem), and Lex-Sem mixed (Mix) objectives to guide LLMs towards improved translation quality via emergent reasoning. Our experiments demonstrate the efficacy of this approach: as RL training progresses, our MT-R1-Zero-3B-Mix achieves competitive performance, surpassing TowerInstruct-7B-v0.2 by an average of 1.26 points across all metrics (BLEU, COMETKiwi, XCOMET) on the WMT 24 English-Chinese (EN-ZH) benchmark. Meanwhile, our MT-R1-Zero-7B-Mix surpasses LLaMA-3.1-70B by an average of 1.24 points and Qwen2.5-72B by 0.48 points, even on par with top proprietary models such as GPT-4o and Claude-3.5-Sonnet. The MT-R1-Zero further demonstrate promising generalizability across multilingual and low-resource settings." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 302, + 71, + 527, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 527, + 274 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 527, + 274 + ], + "type": "text", + "content": "Extensive experiments further provide key findings and insight into the adaptation of R1-Zero paradigm to MT. First, we empirically demonstrate that the choice of metric reward plays a pivotal role in steering RL optimization and translation style (semantic or lexical) (Finding 1). Further analysis reveals that MT-R1-Zero induces diverse emergent reasoning patterns, including dynamic language-of-thought transition during translation (Findings 2 and 3). We also identify distinct RL adaptability of different base LLMs (Finding 4). Ablation studies suggest that the pure RL process alone can lead to substantial translation improvements, independent of thinking morbidity (Section 6). Our core contributions are as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 303, + 283, + 527, + 490 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 303, + 283, + 527, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 283, + 527, + 338 + ], + "spans": [ + { + "bbox": [ + 303, + 283, + 527, + 338 + ], + "type": "text", + "content": "- We present the first open-source implementation of the DeepSeek-R1-Zero paradigm for MT, achieving superior performance across indomain, OOD and generalization MT tasks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 303, + 345, + 527, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 345, + 527, + 399 + ], + "spans": [ + { + "bbox": [ + 303, + 345, + 527, + 399 + ], + "type": "text", + "content": "- Our analysis reveals key findings and recipes for effective R1-Zero adaptation to MT, including reward metric selection, emergent reasoning patterns, training dynamics and LLM adaptability." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 303, + 407, + 527, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 407, + 527, + 490 + ], + "spans": [ + { + "bbox": [ + 303, + 407, + 527, + 490 + ], + "type": "text", + "content": "- Extensive experiments and ablations show that pure RL serves as the primary driver of MT improvements, with minimal dependence on forced reasoning or output length, highlighting the significant potential of RL for diverse translation applications and broader language tasks." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 303, + 497, + 396, + 510 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 497, + 396, + 510 + ], + "spans": [ + { + "bbox": [ + 303, + 497, + 396, + 510 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 518, + 527, + 776 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 518, + 527, + 776 + ], + "spans": [ + { + "bbox": [ + 302, + 518, + 527, + 776 + ], + "type": "text", + "content": "LLM Reasoning with Post-training. Recent research indicates that scaling test-time computation can significantly enhance the ability of LLMs to tackle complex reasoning tasks (OpenAI, 2024; Zeng et al., 2024; Xiang et al., 2025). Many approaches rely on sophisticated techniques such as step-level process reward models (PRMs) that provide granular feedback (Lightman et al., 2024; Yuan et al., 2024; Snell et al., 2024) or MCTS to explore potential reasoning paths (Feng et al., 2023; Qi et al., 2024; Guan et al., 2025). A recent alternative, DeepSeek-R1-Zero (DeepSeek-AI et al., 2025), demonstrated that large-scale pure RL, guided only by formatting rules and correctness of final predictions (rule-based reward), can motivate LLMs to develop self-emergent reasoning processes for complex reasoning tasks. Subsequent work (Hu et al., 2025; Face, 2025) successfully replicated this training paradigm in open-source" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 793, + 301, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 793, + 301, + 803 + ], + "spans": [ + { + "bbox": [ + 293, + 793, + 301, + 803 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 291, + 206 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 291, + 206 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 291, + 206 + ], + "type": "text", + "content": "models, focusing on mathematical domains. Xie et al. (2025) further demonstrated the effectiveness and generalization capabilities of the R1-Zero paradigm using logic reasoning game problems, while Huang et al. (2025) explored its potential for vision reasoning. Despite its potential, the application of the R1-Zero RL paradigm to complex generation tasks like MT, in which the accuracy/quality of outputs is not rule-based and difficult to validate automatically, remains an open question." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 208, + 291, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 208, + 291, + 518 + ], + "spans": [ + { + "bbox": [ + 69, + 208, + 291, + 518 + ], + "type": "text", + "content": "LLM Reasoning for MT. Leveraging reasoning to improve MT has garnered increasing attention, as systematically explored in Chen et al. (2025) and Liu et al. (2025). Previous work have designed multi-step processes for MT, e.g., Feng et al. (2024b) introduced an API-based self-correcting framework, and Wang et al. (2024b) employed multi-task training followed by a multistage inference phase. Wang et al. (2024a) integrated a similar procedure into inference-time CoT, using a multi-agent mechanism to synthesize long CoT prompts for English-Chinese literary translation. Efforts have also focused on reward modeling for MT reasoning. Feng et al. (2025) constructed implicit process reward models for translation and explored their effectiveness when combined with test-time search. Recent study further evaluated explicit reasoning for MT using CoT fine-tuning and MCTS to expand test-time computation (Zhao et al., 2024). He et al. (2025) demonstrated that models can acquire reasoning-based translation capabilities through multi-stage training with manually constructed CoT templates." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 520, + 291, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 520, + 291, + 642 + ], + "spans": [ + { + "bbox": [ + 67, + 520, + 291, + 642 + ], + "type": "text", + "content": "However, these existing methods often necessitate manually designed or synthetically generated structured CoT data, rely on complex search algorithms (MCTS), or require explicit multi-stage prompting (self-correction). The effectiveness of large-scale pure RL training paradigms such as R1-Zero remains unexplored. Furthermore, the performance reported in these studies often lags behind state-of-the-art open-source or proprietary models." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 655, + 130, + 668 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 655, + 130, + 668 + ], + "spans": [ + { + "bbox": [ + 67, + 655, + 130, + 668 + ], + "type": "text", + "content": "3 Method" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 680, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 680, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 680, + 291, + 775 + ], + "type": "text", + "content": "In this section, we present our method that trains a translation model with pure RL using a hybrid reward model. Unlike tasks with fixed correct answers, translation allows for multiple valid outputs, making the evaluation more complicated. In this work, we introduce a rule-metric mixed reward that integrates reasoning format checking with multi-" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 302, + 71, + 526, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 526, + 126 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 526, + 126 + ], + "type": "text", + "content": "ple translation quality assessment metrics, which is used within the Group Relative Policy Optimization (GRPO) (Shao et al., 2024) algorithm to ensure stable and efficient RL training." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 135, + 460, + 147 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 135, + 460, + 147 + ], + "spans": [ + { + "bbox": [ + 302, + 135, + 460, + 147 + ], + "type": "text", + "content": "3.1 Rule-Metric Mixed Reward" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 153, + 526, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 153, + 526, + 423 + ], + "spans": [ + { + "bbox": [ + 302, + 153, + 526, + 423 + ], + "type": "text", + "content": "In RL, the reward is the main signal that drives model training. DeepSeek-R1-Zero (DeepSeek-AI et al., 2025) employs simple rule-based rewards that check whether the final answer is correct and whether the response follows a specific format. This works well for tasks with fixed format correct answers such as math or coding. However, there is often no single \"correct\" output for MT, impeding the design of rule-based rewards. Fortunately, the MT community has developed many evaluation metrics to measure translation quality. Recent advancements in automated MT evaluation metrics have shown promise in aligning automated assessments with human translation quality judgments (Freitag et al., 2022, 2023). Thus, we design a rule-metric mixed reward, which consists of two parts: a Format Reward that checks output structure, and a Metric Reward that evaluates translation quality. We use a structured prompt template similar to that in DeepSeek-R1-Zero:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 319, + 433, + 443, + 446 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 433, + 443, + 446 + ], + "spans": [ + { + "bbox": [ + 319, + 433, + 443, + 446 + ], + "type": "text", + "content": "Template for MT-R1-Zero" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 317, + 455, + 511, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 455, + 511, + 645 + ], + "spans": [ + { + "bbox": [ + 317, + 455, + 511, + 645 + ], + "type": "text", + "content": "A conversation between User and Assistant. The User asks for a translation from {src_language} to {tgt_language}, and the Assistant solves it. The Assistant first thinks about the reasoning process in the mind and then provides the user with the final translation. The reasoning process and final translation are enclosed within and tags, respectively, i.e., reasoning process here final translation here . \nUser:{src_text} \nAssistant:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 659, + 526, + 699 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 659, + 526, + 699 + ], + "spans": [ + { + "bbox": [ + 302, + 659, + 526, + 699 + ], + "type": "text", + "content": "Here, src_language and tgt_language indicate the source and target languages, and src_text denotes the source text requiring translation." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 708, + 527, + 774 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 708, + 527, + 774 + ], + "spans": [ + { + "bbox": [ + 302, + 708, + 527, + 774 + ], + "type": "text", + "content": "Format Reward: We use regular expression extraction to enforce a structured response format. The model is required to place its reasoning process within tags and provide the final translation inside " + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 793, + 300, + 802 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 793, + 300, + 802 + ], + "spans": [ + { + "bbox": [ + 293, + 793, + 300, + 802 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 291, + 98 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 291, + 98 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 291, + 98 + ], + "type": "text", + "content": "tags. The format reward score " + }, + { + "bbox": [ + 67, + 71, + 291, + 98 + ], + "type": "inline_equation", + "content": "(S_{\\text{format}})" + }, + { + "bbox": [ + 67, + 71, + 291, + 98 + ], + "type": "text", + "content": " is computed as:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 90, + 117, + 267, + 153 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 117, + 267, + 153 + ], + "spans": [ + { + "bbox": [ + 90, + 117, + 267, + 153 + ], + "type": "interline_equation", + "content": "S _ {f o r m a t} = \\left\\{ \\begin{array}{l l} 1, & \\text {i f f o r m a t i s c o r r e c t} \\\\ - 1, & \\text {i f f o r m a t i s i n c o r r e c t} \\end{array} \\right.", + "image_path": "e69942eb747b7718124ac8b6d797571fd31e5f654d60911f1102bcd929edc478.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 160, + 291, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 160, + 291, + 229 + ], + "spans": [ + { + "bbox": [ + 67, + 160, + 291, + 229 + ], + "type": "text", + "content": "Metric Reward: This reward evaluates the quality of model's translation, but only if the response format is correct. We use automatic evaluation metrics to calculate a translation quality score " + }, + { + "bbox": [ + 67, + 160, + 291, + 229 + ], + "type": "inline_equation", + "content": "S_{\\text{metric}}" + }, + { + "bbox": [ + 67, + 160, + 291, + 229 + ], + "type": "text", + "content": ". We explore three approaches to compute " + }, + { + "bbox": [ + 67, + 160, + 291, + 229 + ], + "type": "inline_equation", + "content": "S_{\\text{metric}}" + }, + { + "bbox": [ + 67, + 160, + 291, + 229 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 237, + 291, + 592 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 69, + 237, + 291, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 237, + 291, + 344 + ], + "spans": [ + { + "bbox": [ + 69, + 237, + 291, + 344 + ], + "type": "text", + "content": "1. N-gram Lexical Matching Reward (RewardLex): Metrics such as BLEU (Papineni et al., 2002) orchrF (Popovic, 2015) evaluate translation quality by measuring the difference (primarily lexical overlap) between the translation and the human-written reference. In our experiments, we employ the BLEU score calculated via the sacrebleu" + }, + { + "bbox": [ + 69, + 237, + 291, + 344 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 69, + 237, + 291, + 344 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 354, + 291, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 354, + 291, + 502 + ], + "spans": [ + { + "bbox": [ + 67, + 354, + 291, + 502 + ], + "type": "text", + "content": "2. Semantic and Contextual Reward (Reward-Sem): Learning-based metrics like COMET (Rei et al., 2020) and COMETKiwi (Rei et al., 2022) are trained on human judgments (e.g., MQM quality assessments (Freitag et al., 2021)). These metrics can recognize good translations even if the wording differs from the reference, as long as the meaning is preserved. We use the COMETKiwi- " + }, + { + "bbox": [ + 67, + 354, + 291, + 502 + ], + "type": "inline_equation", + "content": "23^{2}" + }, + { + "bbox": [ + 67, + 354, + 291, + 502 + ], + "type": "text", + "content": ", which was used in the WMT 24 (Kocmi et al., 2024) and only needs the source sentence and the model's translation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 512, + 290, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 512, + 290, + 592 + ], + "spans": [ + { + "bbox": [ + 68, + 512, + 290, + 592 + ], + "type": "text", + "content": "3. Lexical and Semantic Mixed Reward (Reward-Mix): To capture both lexical fidelity and semantic adequacy, we use a hybrid reward (Reward-Mix) that adds together Lexical Matching Reward (Reward-Lex) and Semantic and Contextual Reward (Reward-Sem)." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 602, + 290, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 602, + 290, + 629 + ], + "spans": [ + { + "bbox": [ + 67, + 602, + 290, + 629 + ], + "type": "text", + "content": "Accordingly, the computation of " + }, + { + "bbox": [ + 67, + 602, + 290, + 629 + ], + "type": "inline_equation", + "content": "S_{\\text{metric}}" + }, + { + "bbox": [ + 67, + 602, + 290, + 629 + ], + "type": "text", + "content": " depends on the selected reward configuration:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 638, + 299, + 675 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 638, + 299, + 675 + ], + "spans": [ + { + "bbox": [ + 68, + 638, + 299, + 675 + ], + "type": "interline_equation", + "content": "S _ {m e t r i c} = \\left\\{ \\begin{array}{l l} \\mathrm {B} (\\text {t r a n s}, \\text {r e f}), & \\text {i f R e w a r d - L e x} \\\\ \\mathrm {C K} (\\text {s r c}, \\text {t r a n s}) & \\text {i f R e w a r d - S e m} \\\\ \\mathrm {B} (\\text {t r a n s}, \\text {r e f}) + \\mathrm {C K} (\\text {s r c}, \\text {t r a n s}), & \\text {i f R e w a r d - M i x} \\end{array} \\right.", + "image_path": "e41cbd9c8447d649ac024c04363a29dd3ca5bfeb740d405db90050c2446bbc4f.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 683, + 291, + 736 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 683, + 291, + 736 + ], + "spans": [ + { + "bbox": [ + 67, + 683, + 291, + 736 + ], + "type": "text", + "content": "where B denotes normalized BLEU score, CK denotes the COMETKiwi score, trans is the generated translation, ref is the reference translation, and src is the source text." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 71, + 525, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 525, + 126 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 525, + 126 + ], + "type": "text", + "content": "Rule-Metric Mixed Reward: The final reward " + }, + { + "bbox": [ + 302, + 71, + 525, + 126 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 302, + 71, + 525, + 126 + ], + "type": "text", + "content": " combines both the format reward (" + }, + { + "bbox": [ + 302, + 71, + 525, + 126 + ], + "type": "inline_equation", + "content": "S_{\\text{format}}" + }, + { + "bbox": [ + 302, + 71, + 525, + 126 + ], + "type": "text", + "content": ") and the metric reward (" + }, + { + "bbox": [ + 302, + 71, + 525, + 126 + ], + "type": "inline_equation", + "content": "S_{\\text{metric}}" + }, + { + "bbox": [ + 302, + 71, + 525, + 126 + ], + "type": "text", + "content": "). Formally, it is calculated using the following rule:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 312, + 143, + 513, + 179 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 143, + 513, + 179 + ], + "spans": [ + { + "bbox": [ + 312, + 143, + 513, + 179 + ], + "type": "interline_equation", + "content": "r = \\left\\{ \\begin{array}{l l} S _ {f o r m a t} - 2, & \\text {i f} S _ {f o r m a t} = - 1 \\\\ S _ {f o r m a t} + S _ {m e t r i c}, & \\text {i f} S _ {f o r m a t} = 1 \\end{array} \\right.", + "image_path": "ce703c30b6d4f759ae322599c25adbf3b556fc22b8114be045e5c526169a0056.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 185, + 526, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 185, + 526, + 402 + ], + "spans": [ + { + "bbox": [ + 302, + 185, + 526, + 402 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 302, + 185, + 526, + 402 + ], + "type": "inline_equation", + "content": "S_{\\text{metric}}" + }, + { + "bbox": [ + 302, + 185, + 526, + 402 + ], + "type": "text", + "content": " is calculated only if the response format is correct. " + }, + { + "bbox": [ + 302, + 185, + 526, + 402 + ], + "type": "inline_equation", + "content": "S_{\\text{format}} = 1" + }, + { + "bbox": [ + 302, + 185, + 526, + 402 + ], + "type": "text", + "content": ". If the format is incorrect (" + }, + { + "bbox": [ + 302, + 185, + 526, + 402 + ], + "type": "inline_equation", + "content": "S_{\\text{format}} = -1" + }, + { + "bbox": [ + 302, + 185, + 526, + 402 + ], + "type": "text", + "content": "), we skip the metric reward evaluation and assign a fixed penalty (e.g., 2) to discourage format violations. This setup encourages the model to first learn the correct output structure. When the format is correct, the final reward becomes " + }, + { + "bbox": [ + 302, + 185, + 526, + 402 + ], + "type": "inline_equation", + "content": "r = 1 + S_{\\text{metric}}" + }, + { + "bbox": [ + 302, + 185, + 526, + 402 + ], + "type": "text", + "content": ". Unlike traditional rule-based rewards that give a fixed score for correct outputs, our approach uses a continuous metric score. This means the reward can vary within the [1, 2] or [1, 3] range, depending on translation quality. As a result, the model receives more detailed feedback and can learn to improve even small differences in translation quality across correctly formatted outputs." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 411, + 398, + 424 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 411, + 398, + 424 + ], + "spans": [ + { + "bbox": [ + 302, + 411, + 398, + 424 + ], + "type": "text", + "content": "3.2 RL Algorithm" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 428, + 525, + 565 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 428, + 525, + 565 + ], + "spans": [ + { + "bbox": [ + 302, + 428, + 525, + 565 + ], + "type": "text", + "content": "We use the Group Relative Policy Optimization (GRPO) algorithm (Shao et al., 2024) to train the translation model with our rule-metric mixed reward. In each training step, for a given translational question " + }, + { + "bbox": [ + 302, + 428, + 525, + 565 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 302, + 428, + 525, + 565 + ], + "type": "text", + "content": ", we sample a group of candidate outputs " + }, + { + "bbox": [ + 302, + 428, + 525, + 565 + ], + "type": "inline_equation", + "content": "\\{o_1, o_2, \\dots, o_G\\}" + }, + { + "bbox": [ + 302, + 428, + 525, + 565 + ], + "type": "text", + "content": " from the policy model " + }, + { + "bbox": [ + 302, + 428, + 525, + 565 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta_{old}}" + }, + { + "bbox": [ + 302, + 428, + 525, + 565 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 302, + 428, + 525, + 565 + ], + "type": "inline_equation", + "content": "A_i = \\frac{r_i - \\mathrm{mean}(\\{r_1, r_2, \\dots, r_G\\})}{\\mathrm{std}(\\{r_1, r_2, \\dots, r_G\\})}" + }, + { + "bbox": [ + 302, + 428, + 525, + 565 + ], + "type": "text", + "content": " is the computed advantage using the group rule-metric mixed rewards " + }, + { + "bbox": [ + 302, + 428, + 525, + 565 + ], + "type": "inline_equation", + "content": "\\{r_1, r_2, \\dots, r_G\\}" + }, + { + "bbox": [ + 302, + 428, + 525, + 565 + ], + "type": "text", + "content": ". GRPO then maximizes the following objective function to optimize " + }, + { + "bbox": [ + 302, + 428, + 525, + 565 + ], + "type": "inline_equation", + "content": "\\pi_\\theta" + }, + { + "bbox": [ + 302, + 428, + 525, + 565 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 309, + 573, + 525, + 707 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 573, + 525, + 707 + ], + "spans": [ + { + "bbox": [ + 309, + 573, + 525, + 707 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} J _ {\\mathrm {G R P O}} (\\theta) = \\mathbb {E} _ {q \\sim P (Q), \\{o _ {i} \\} _ {i = 1} ^ {G} \\sim \\pi_ {\\theta_ {\\mathrm {o l d}}} (O | q)} \\\\ \\left[ \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\min \\left(\\frac {\\pi_ {\\theta} (o _ {i} \\mid q)}{\\pi_ {\\theta_ {\\mathrm {o l d}}} (o _ {i} \\mid q)} A _ {i}, \\right. \\right. \\\\ \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta} (o _ {i} \\mid q)}{\\pi_ {\\theta_ {\\mathrm {o l d}}} (o _ {i} \\mid q)}, 1 - \\varepsilon , 1 + \\varepsilon\\right) A _ {i}\\left. \\right) \\\\ \\left. - \\beta D _ {\\mathrm {K L}} \\left(\\pi_ {\\theta} \\| \\pi_ {\\text {r e f}}\\right) \\right], \\tag {1} \\\\ \\end{array}", + "image_path": "ccde1d795737b77b035a78d78d9a3d8caaca032767489bbe456ecb086b0bf4cd.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "type": "inline_equation", + "content": "\\varepsilon" + }, + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "type": "text", + "content": " are hyperparameters controlling the PPO clipping threshold and the weight of the Kullback-Leibler (KL) divergence penalty (Schulman et al., 2017; Shao et al., 2024), respectively. Specifically, " + }, + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "type": "inline_equation", + "content": "\\varepsilon" + }, + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "type": "text", + "content": " determines the permissible range for policy" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 67, + 741, + 287, + 773 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 741, + 287, + 773 + ], + "spans": [ + { + "bbox": [ + 67, + 741, + 287, + 773 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 67, + 741, + 287, + 773 + ], + "type": "text", + "content": "https://github.com/mjpost/sacrebleu \n" + }, + { + "bbox": [ + 67, + 741, + 287, + 773 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 67, + 741, + 287, + 773 + ], + "type": "text", + "content": "https://huggingface.co/Unbabel/wmt23-cometkiwi-da-xl" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 293, + 793, + 300, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 793, + 300, + 803 + ], + "spans": [ + { + "bbox": [ + 293, + 793, + 300, + 803 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 291, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 291, + 158 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 291, + 158 + ], + "type": "text", + "content": "updates, while " + }, + { + "bbox": [ + 67, + 71, + 291, + 158 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 67, + 71, + 291, + 158 + ], + "type": "text", + "content": " regulates the magnitude of the KL penalty during training to prevent excessive policy shifts from the reference policy " + }, + { + "bbox": [ + 67, + 71, + 291, + 158 + ], + "type": "inline_equation", + "content": "\\pi_{ref}" + }, + { + "bbox": [ + 67, + 71, + 291, + 158 + ], + "type": "text", + "content": " (typically the initialization of " + }, + { + "bbox": [ + 67, + 71, + 291, + 158 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 67, + 71, + 291, + 158 + ], + "type": "text", + "content": "). " + }, + { + "bbox": [ + 67, + 71, + 291, + 158 + ], + "type": "inline_equation", + "content": "D_{KL}(\\pi_{\\theta} \\| \\pi_{\\mathrm{ref}}) = \\frac{\\pi_{\\mathrm{ref}}(o_i|q)}{\\pi_{\\theta}(o_i|q)} - \\log \\left(\\frac{\\pi_{\\mathrm{ref}}(o_i|q)}{\\pi_{\\theta}(o_i|q)}\\right) - 1" + }, + { + "bbox": [ + 67, + 71, + 291, + 158 + ], + "type": "text", + "content": " is the KL divergence approximation term." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 169, + 155, + 183 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 169, + 155, + 183 + ], + "spans": [ + { + "bbox": [ + 67, + 169, + 155, + 183 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 192, + 189, + 205 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 192, + 189, + 205 + ], + "spans": [ + { + "bbox": [ + 67, + 192, + 189, + 205 + ], + "type": "text", + "content": "4.1 Experimental Setup" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 210, + 291, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 210, + 291, + 439 + ], + "spans": [ + { + "bbox": [ + 67, + 210, + 291, + 439 + ], + "type": "text", + "content": "Dataset and Benchmarks. Our primary experimental focus is on English (EN) and Chinese (ZH). Following Xu et al. (2023) and Feng et al. (2024a), we collect parallel examples " + }, + { + "bbox": [ + 67, + 210, + 291, + 439 + ], + "type": "inline_equation", + "content": "(\\mathrm{EN} \\rightleftharpoons \\mathrm{ZH})" + }, + { + "bbox": [ + 67, + 210, + 291, + 439 + ], + "type": "text", + "content": " sourced from WMT 2017 through WMT 2020. We apply a filter to exclude sentences containing fewer than 30 characters, leading to a final training set of 13,130 examples. For evaluation, we assess performance on two in-domain translation tasks using recent WMT benchmarks: EN-ZH (WMT " + }, + { + "bbox": [ + 67, + 210, + 291, + 439 + ], + "type": "inline_equation", + "content": "24^{3}" + }, + { + "bbox": [ + 67, + 210, + 291, + 439 + ], + "type": "text", + "content": ") and ZHEN (WMT " + }, + { + "bbox": [ + 67, + 210, + 291, + 439 + ], + "type": "inline_equation", + "content": "23^{4}" + }, + { + "bbox": [ + 67, + 210, + 291, + 439 + ], + "type": "text", + "content": "). Additionally, we evaluate generalization capabilities on three out-of-distribution (OOD) translation directions: English-Japanese (EN-JA, WMT 2024), German-English (DE-EN, WMT 2023 Document-level), and German-Chinese (DE-ZH, Flores-200 (Costa-jussa et al., 2022)). Detailed statistics are presented in Table 8." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 440, + 291, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 440, + 291, + 576 + ], + "spans": [ + { + "bbox": [ + 67, + 440, + 291, + 576 + ], + "type": "text", + "content": "Baselines. Our primary baselines encompass leading proprietary models, namely Claude-3.5-Sonnet (Anthropic, 2024), GPT-4o (OpenAI, 2023), and Gemini-1.5-Pro (Team et al., 2024), alongside advanced open-source models such as the Qwen2.5 series (Yang et al., 2024), LLaMA-3.1 series (Grattafori et al., 2024), and the translation-specific Tower family (Alves et al., 2024). Proprietary models were accessed via their APIs5. More evaluation details can be found in Appendix A." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 576, + 291, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 576, + 291, + 698 + ], + "spans": [ + { + "bbox": [ + 67, + 576, + 291, + 698 + ], + "type": "text", + "content": "Evaluation Metrics. We assess translation quality using a suite of three complementary metrics: the lexical metric BLEU (Post, 2018), the reference-free learning-based metric COMETKiwi (Rei et al., 2022) (COMETKiwi-23-XL), and the reference-based learning-based metric XCOMET (Guerreiro et al., 2024) (XCOMET-XL). Together, these metrics provide a comprehensive view by evaluating both lexical fidelity and semantic adequacy." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 699, + 290, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 699, + 290, + 712 + ], + "spans": [ + { + "bbox": [ + 67, + 699, + 290, + 712 + ], + "type": "text", + "content": "Training Details. Our implementation is based on" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 70, + 526, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 70, + 526, + 301 + ], + "spans": [ + { + "bbox": [ + 302, + 70, + 526, + 301 + ], + "type": "text", + "content": "the verl" + }, + { + "bbox": [ + 302, + 70, + 526, + 301 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 302, + 70, + 526, + 301 + ], + "type": "text", + "content": " framework. We selected the Qwen2.5-base series (3B and 7B parameter variants) as starting models for MT-R1-Zero training. During training, we configure a batch size of 8 and utilize 8 rollouts per prompt within the GRPO algorithm. We employ a constant learning rate of 5e-7 and set the sampling temperature to 1.0. The maximum generation length for responses is capped at 1024 tokens. We set the KL penalty coefficient " + }, + { + "bbox": [ + 302, + 70, + 526, + 301 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 302, + 70, + 526, + 301 + ], + "type": "text", + "content": " to 0, thereby removing the KL constraint against the reference policy. This decision stems from our empirical observation that the KL penalty tends to restrict the model's exploration of diverse response lengths, which we will discuss further in Section 6.1. The PPO clipping range " + }, + { + "bbox": [ + 302, + 70, + 526, + 301 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 302, + 70, + 526, + 301 + ], + "type": "text", + "content": " is set to 0.2. All models are trained for 1 epoch on 4 NVIDIA H800 80G GPUs for about 13 hours." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 313, + 393, + 325 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 313, + 393, + 325 + ], + "spans": [ + { + "bbox": [ + 302, + 313, + 393, + 325 + ], + "type": "text", + "content": "4.2 Main Results" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 332, + 526, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 332, + 526, + 684 + ], + "spans": [ + { + "bbox": [ + 302, + 332, + 526, + 684 + ], + "type": "text", + "content": "In-Domain Performance. Our models show substantial gains over their corresponding base versions, and exhibit competing performance compared to existing SoTA benchmarks (Table 1). On the EN-ZH direction, our MT-R1-Zero-7B-Mix on the average score (62.25) also surpasses GPT-4o (61.86) and Qwen2.5-72B (61.77). In addition, the MT-R1-Zero-7B-Sem achieves the best semantic-level performance on EN-ZH, scoring 72.07 on COMETKiwi and 79.37 on XCOMET. This surpasses the strongest proprietary model, Claude3.5-Sonnet, by 1.68 COMETKiwi points and exceeds the best listed open-source model, Qwen2.5-72B, by more than 3 points. On the ZH-EN direction, MT-R1-Zero-7B-Mix is also highly competitive. Our MT-R1-Zero-7B-Sem achieves a COMETKiwi score of 71.66, which is comparable to the top closed models (Claude-3.5-Sonnet 71.69, GPT-4o 71.63) and surpasses strong open-source models such as LLaMA-3.1-70B (70.43) and Qwen2.5-72B (70.95). Furthermore, the MT-R1-Zero-3B-Sem delivers impressive performance for its scale. It scores 69.75 COMETKiwi on EN-ZH, which is approximately 1.7 points higher than the much larger LLaMA-3.1-70B and over 0.7 points above Qwen2.5-72B." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 685, + 526, + 752 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 685, + 526, + 752 + ], + "spans": [ + { + "bbox": [ + 302, + 685, + 526, + 752 + ], + "type": "text", + "content": "Out-of-Distribution Performance. Table 2 reports the XCOMET of our models on OOD language pairs with a zero-shot setting (models trained only on EN-ZH/ZH-EN). Despite this challenging setup, our models exhibit strong generaliza" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 81, + 720, + 279, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 720, + 279, + 731 + ], + "spans": [ + { + "bbox": [ + 81, + 720, + 279, + 731 + ], + "type": "text", + "content": "3https://www2.statmt.org/wmt24/translation-task.html" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 81, + 732, + 278, + 742 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 732, + 278, + 742 + ], + "spans": [ + { + "bbox": [ + 81, + 732, + 278, + 742 + ], + "type": "text", + "content": "4https://www2.statmt.org/wmt23/translation-task.html" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 743, + 289, + 774 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 743, + 289, + 774 + ], + "spans": [ + { + "bbox": [ + 69, + 743, + 289, + 774 + ], + "type": "text", + "content": "5The specific proprietary models accessed include Anthropic's claude-3-5-sonnet-20241022, OpenAI's gpt-4o-2024-08-06, and Google's gemini-1.5-pro." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 315, + 762, + 446, + 774 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 762, + 446, + 774 + ], + "spans": [ + { + "bbox": [ + 315, + 762, + 446, + 774 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 315, + 762, + 446, + 774 + ], + "type": "text", + "content": "https://github.com/volcengine/verl" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 293, + 793, + 300, + 802 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 793, + 300, + 802 + ], + "spans": [ + { + "bbox": [ + 293, + 793, + 300, + 802 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 70, + 68, + 523, + 336 + ], + "blocks": [ + { + "bbox": [ + 70, + 68, + 523, + 336 + ], + "lines": [ + { + "bbox": [ + 70, + 68, + 523, + 336 + ], + "spans": [ + { + "bbox": [ + 70, + 68, + 523, + 336 + ], + "type": "table", + "html": "
MODELZH-ENEN-ZH
BLEUCOMETKiwiXCOMETAvg.BLEUCOMETKiwiXCOMETAvg.
Closed
Claude-3.5-Sonnet (2024/10)22.5571.6987.3260.5238.6370.3978.2462.42
GPT-4o (2024/08)22.5771.6387.2260.4741.1369.0175.4361.86
Gemini-1.5-Pro (2025/03)18.3469.2385.5557.7139.8267.4776.2661.18
Open
General Purpose LLMs
LLaMA-3.1-70B-Instruct25.1970.4386.2160.6139.8268.0575.1761.01
Qwen2.5-72B-Instruct21.9670.9587.0759.9939.2969.0476.9761.77
Qwen2.5-32B-Instruct20.5469.3585.4758.4536.3668.4374.9059.90
Translation-Specific LLMs
TowerInstruct-13B-v0.124.7270.1785.6960.1937.0666.2273.1358.80
TowerInstruct-7B-v0.223.3269.9984.9359.4134.9364.0470.6756.55
Ours
Qwen2.5-3B-Base14.2664.8676.7651.9615.9052.0567.1345.03
MT-R1-Zero-3B-Lex21.5366.3381.6956.5233.7060.5865.6753.32
MT-R1-Zero-3B-Sem18.4170.3385.9858.2424.3269.7576.9257.00
MT-R1-Zero-3B-Mix22.5468.8484.0858.4936.2765.0572.1057.81
Qwen2.5-7B-Base18.2368.2784.9957.1631.1463.3869.8354.78
MT-R1-Zero-7B-Lex23.5665.3582.1257.0140.1164.5770.2158.30
MT-R1-Zero-7B-Sem16.6271.6686.0758.1223.0772.0779.3758.17
MT-R1-Zero-7B-Mix23.9870.8186.1760.3240.9769.4376.3662.25
", + "image_path": "aeb6fe232c20e93a9294e931828401ff289284f573bd6eaeb2a718bc2b332396.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 70, + 410, + 287, + 528 + ], + "blocks": [ + { + "bbox": [ + 67, + 343, + 525, + 391 + ], + "lines": [ + { + "bbox": [ + 67, + 343, + 525, + 391 + ], + "spans": [ + { + "bbox": [ + 67, + 343, + 525, + 391 + ], + "type": "text", + "content": "Table 1: Performance comparison on in-domain translation directions (EN-ZH, ZH-EN) using BLEU, COMETKiwi, and XCOMET metrics, with average metric scores (Avg.). MT-R1-Zero variants (-Lex, -Sem, -Mix) are compared against closed and open baselines, which are further categorized by accessibility and specialization. The -Mix variant often achieves the best balance, while -Sem reaches peak semantic scores." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 70, + 410, + 287, + 528 + ], + "lines": [ + { + "bbox": [ + 70, + 410, + 287, + 528 + ], + "spans": [ + { + "bbox": [ + 70, + 410, + 287, + 528 + ], + "type": "table", + "html": "
MODELOUT-OF-DISTRIBUTION
EN-JADE-EN (DOC)DE-ZHAvg.
Strong Baseline
Qwen2.5-72B-Instruct76.8689.5188.4284.93
LLaMA3.1-70B-Instruct75.6788.7287.4283.94
Same-size Baseline
Qwen2.5-7B-Instruct63.7487.4584.4378.54
LLaMA-3.1-8B-Instruct64.5086.8482.2377.86
TowerInstruct-7B-v0.256.7389.4784.2876.83
MT-R1-Zero-7B-Lex60.6585.2583.8676.59
MT-R1-Zero-7B-Sem71.9587.6887.6682.43
MT-R1-Zero-7B-Mix68.4988.6988.6981.96
", + "image_path": "c14738ed2a90fea60765a305cf9e025702d36c8cc35a56742176cf90905b8840.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 537, + 290, + 573 + ], + "lines": [ + { + "bbox": [ + 67, + 537, + 290, + 573 + ], + "spans": [ + { + "bbox": [ + 67, + 537, + 290, + 573 + ], + "type": "text", + "content": "Table 2: Out-of-distribution performance comparison using the XCOMET metric on EN-JA, DE-EN (Document-level), and DE-ZH." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 66, + 598, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 598, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 66, + 598, + 291, + 775 + ], + "type": "text", + "content": "tion. The MT-R1-Zero-7B-Sem achieves the highest average XCOMET score (82.43) across the OOD tasks, reaching top scores on EN-JA (71.95) and DE-EN (87.68). The MT-R1-Zero-7B-Mix also demonstrates highly competitive generalization with an average score of 81.96, and secures the highest score on DE-ZH (88.69). While these variants do not consistently surpass the much larger strong baselines (Qwen2.5-72B Avg. 84.93, LLaMA3.1-70B Avg. 83.94), they are still highly competitive. Crucially, MT-R1-Zero-7B-Sem and -Mix significantly outperform all same-size baselines (Qwen2.5-7B-Instruct Avg. 78.54, LLaMA" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 302, + 412, + 526, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 412, + 526, + 507 + ], + "spans": [ + { + "bbox": [ + 302, + 412, + 526, + 507 + ], + "type": "text", + "content": "3.1-8B-Instruct Avg. 77.86, TowerInstruct-7B-v0.2 Avg. 76.83) by a considerable margin (at least 3.4 points). These OOD results suggest that the quality improvements in MT-R1-Zero can effectively transfer to unseen language pairs. Results using COMETKiwi and BLEU are also provided in Appendix Tables 6 and 7, respectively." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 518, + 455, + 533 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 518, + 455, + 533 + ], + "spans": [ + { + "bbox": [ + 302, + 518, + 455, + 533 + ], + "type": "text", + "content": "5 Key Findings and Insight" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 542, + 525, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 542, + 525, + 608 + ], + "spans": [ + { + "bbox": [ + 302, + 542, + 525, + 608 + ], + "type": "text", + "content": "Based on our extensive experiments adapting the R1-Zero paradigm to MT, we identify several key findings regarding the underlying mechanisms, design ideas, and emergent behaviors of our MT-R1-Zero framework." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 620, + 495, + 634 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 620, + 495, + 634 + ], + "spans": [ + { + "bbox": [ + 302, + 620, + 495, + 634 + ], + "type": "text", + "content": "5.1 Impact of Reward Metric Selection" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 638, + 525, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 638, + 525, + 706 + ], + "spans": [ + { + "bbox": [ + 302, + 638, + 525, + 706 + ], + "type": "text", + "content": "As detailed in Section 3.1, we explore three metric rewards: Reward-Lex, Reward-Sem, and Reward-Mix. Our results demonstrate that the choice among these significantly affects the learning target and final model outputs, as stated in Finding 1." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 317, + 722, + 511, + 762 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 722, + 511, + 762 + ], + "spans": [ + { + "bbox": [ + 317, + 722, + 511, + 762 + ], + "type": "text", + "content": "Finding 1: Reward metric selection critically shapes optimization targets and translation style." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 793, + 301, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 793, + 301, + 803 + ], + "spans": [ + { + "bbox": [ + 293, + 793, + 301, + 803 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 74, + 68, + 227, + 201 + ], + "blocks": [ + { + "bbox": [ + 74, + 68, + 227, + 201 + ], + "lines": [ + { + "bbox": [ + 74, + 68, + 227, + 201 + ], + "spans": [ + { + "bbox": [ + 74, + 68, + 227, + 201 + ], + "type": "image", + "image_path": "23089aaf58a392db50729b3ce583e9bf14970ec78ddace94058d3f25fd77b580.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 211, + 526, + 234 + ], + "lines": [ + { + "bbox": [ + 67, + 211, + 526, + 234 + ], + "spans": [ + { + "bbox": [ + 67, + 211, + 526, + 234 + ], + "type": "text", + "content": "Figure 2: Training dynamics using Reward-Lex, Reward-Sem, and Reward-Mix, evaluated with COMETKiwi, BLEU, and XCOMET." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 226, + 68, + 372, + 201 + ], + "blocks": [ + { + "bbox": [ + 226, + 68, + 372, + 201 + ], + "lines": [ + { + "bbox": [ + 226, + 68, + 372, + 201 + ], + "spans": [ + { + "bbox": [ + 226, + 68, + 372, + 201 + ], + "type": "image", + "image_path": "6c56dc9a984522cc0e741f0de6a2bac59cf804d50995fb0411cd7ea83591766e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 372, + 68, + 518, + 201 + ], + "blocks": [ + { + "bbox": [ + 372, + 68, + 518, + 201 + ], + "lines": [ + { + "bbox": [ + 372, + 68, + 518, + 201 + ], + "spans": [ + { + "bbox": [ + 372, + 68, + 518, + 201 + ], + "type": "image", + "image_path": "b0f8de023d87072cb76332d6598f440181ff0dd4eb685e2fd493c511dac131f7.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 74, + 238, + 521, + 506 + ], + "blocks": [ + { + "bbox": [ + 74, + 238, + 521, + 506 + ], + "lines": [ + { + "bbox": [ + 74, + 238, + 521, + 506 + ], + "spans": [ + { + "bbox": [ + 74, + 238, + 521, + 506 + ], + "type": "image", + "image_path": "ba5a6d095eeb40acfd1aa4e6f18c36e661f2b8ecf8e4bc67ac76bd24ea907ff4.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 515, + 525, + 541 + ], + "lines": [ + { + "bbox": [ + 67, + 515, + 525, + 541 + ], + "spans": [ + { + "bbox": [ + 67, + 515, + 525, + 541 + ], + "type": "text", + "content": "Figure 3: Qualitative examples illustrates the effect of different reward functions (Reward-Lex, Reward-Sem, Reward-Mix) on EN-ZH translation, where the stylistic differences are driven by reward optimization (Finding 1)." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 553, + 291, + 769 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 553, + 291, + 769 + ], + "spans": [ + { + "bbox": [ + 67, + 553, + 291, + 769 + ], + "type": "text", + "content": "Figure 2 presents the training dynamics with different rewards. Training with Reward-Lex maximizes BLEU scores, often at the expense of semantic scores, while Reward-Sem maximizes COMETKiwi, leading to a decline in BLEU. Training with Reward-Mix improves both metrics, with a trade-off of achieving sub-optimal COMETKiwi compared to Reward-Sem. Independent evaluation with XCOMET further supports this finding, showing consistent improvements for Sem and Mix variants while fluctuating for Lex. This finding aligns with the insight from Chen et al. (2025), suggesting that lexical and semantic assessments are complementary, particularly for reasoning-oriented LLMs, and combining them can offer a more comprehensive evaluation signal." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 553, + 526, + 740 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 553, + 526, + 740 + ], + "spans": [ + { + "bbox": [ + 302, + 553, + 526, + 740 + ], + "type": "text", + "content": "Qualitatively (Figure 3), this optimization alignment manifests as distinct translation styles. BLEU optimization encourages literal, n-gram focused translations, potentially sacrificing nuance. COMETKiwi optimization fosters translations that prioritize semantic faithfulness, even if lexically divergent from references. In contrast, the mixed reward yields balanced translations. This demonstrates that the metric reward fundamentally dictates the nature of the translation quality learned (e.g., semantic v.s. lexical). Therefore, careful metric selection and deliberate fusion are essential for tailoring RL-based MT refinement towards specific and desired translations." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 793, + 299, + 801 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 793, + 299, + 801 + ], + "spans": [ + { + "bbox": [ + 293, + 793, + 299, + 801 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 88, + 70, + 295, + 213 + ], + "blocks": [ + { + "bbox": [ + 88, + 70, + 295, + 213 + ], + "lines": [ + { + "bbox": [ + 88, + 70, + 295, + 213 + ], + "spans": [ + { + "bbox": [ + 88, + 70, + 295, + 213 + ], + "type": "image", + "image_path": "d091b63c34fab3acdd1a2db73581403ba0aaffa3a30b26bc7b4f08eb9981e3cc.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 223, + 526, + 260 + ], + "lines": [ + { + "bbox": [ + 67, + 223, + 526, + 260 + ], + "spans": [ + { + "bbox": [ + 67, + 223, + 526, + 260 + ], + "type": "text", + "content": "Figure 4: Training dynamics of MT-R1-Zero models (using Reward-Sem). Left: COMETKiwi score progression for 3B and 7B models on EN-ZH and ZH-EN test sets. Right: Average response length changes over training steps, exhibiting the classic decrease-then-increase pattern (Finding 2)." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 296, + 69, + 504, + 212 + ], + "blocks": [ + { + "bbox": [ + 296, + 69, + 504, + 212 + ], + "lines": [ + { + "bbox": [ + 296, + 69, + 504, + 212 + ], + "spans": [ + { + "bbox": [ + 296, + 69, + 504, + 212 + ], + "type": "image", + "image_path": "1b66e6c2a22b2c8383e6ce327aa237f85185f220afb86525d8f7223f066d8e8c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 525, + 526, + 561 + ], + "lines": [ + { + "bbox": [ + 67, + 525, + 526, + 561 + ], + "spans": [ + { + "bbox": [ + 67, + 525, + 526, + 561 + ], + "type": "text", + "content": "Figure 5: Evolution of an MT-R1-Zero model's reasoning process and translation output for the Chinese source text \"其影响可能类似于2008年的经济危机\" at different training steps (0, 400, 1600), showcasing the shift from decomposition to more semantic analysis (Finding 2)." + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 93, + 270, + 255, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 270, + 255, + 280 + ], + "spans": [ + { + "bbox": [ + 93, + 270, + 255, + 280 + ], + "type": "text", + "content": "Source (ZH):至于电量的详细说明,一般人看着心里会发虚。" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 273, + 268, + 475, + 284 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 273, + 268, + 475, + 284 + ], + "spans": [ + { + "bbox": [ + 273, + 268, + 475, + 284 + ], + "type": "text", + "content": "Reference (EN):The majority of individuals will feel guilty after reading the in-depth description of the power." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 280, + 292, + 311, + 302 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 280, + 292, + 311, + 302 + ], + "spans": [ + { + "bbox": [ + 280, + 292, + 311, + 302 + ], + "type": "text", + "content": "Step 0" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 85, + 301, + 222, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 301, + 222, + 309 + ], + "spans": [ + { + "bbox": [ + 85, + 301, + 222, + 309 + ], + "type": "text", + "content": "User:至于电量的详细说明,一般人看着心里会发虚。" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 86, + 309, + 503, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 309, + 503, + 329 + ], + "spans": [ + { + "bbox": [ + 86, + 309, + 503, + 329 + ], + "type": "text", + "content": "Assistant: To translate the given Chinese sentence into English, I will first break down the sentence into its components and then find the appropriate English equivalents for each component. The sentence is: \"至于电量的详细说明,一般人看着心里会发虚。\"" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 86, + 329, + 362, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 329, + 362, + 338 + ], + "spans": [ + { + "bbox": [ + 86, + 329, + 362, + 338 + ], + "type": "text", + "content": "1. 至于 - This is a preposition that can be translated to \"as for\" or \"regarding.\"" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 86, + 338, + 320, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 338, + 320, + 345 + ], + "spans": [ + { + "bbox": [ + 86, + 338, + 320, + 345 + ], + "type": "text", + "content": "2. 电量 - This is a compound noun that means \"electricity\" or \"power.\"" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 86, + 345, + 356, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 345, + 356, + 351 + ], + "spans": [ + { + "bbox": [ + 86, + 345, + 356, + 351 + ], + "type": "text", + "content": "3. 的 - This is a possessive particle that indicates possession or relationship." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 86, + 351, + 418, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 351, + 418, + 358 + ], + "spans": [ + { + "bbox": [ + 86, + 351, + 418, + 358 + ], + "type": "text", + "content": "4. 详细说明 - This is a compound noun that means \"detailed explanation\" or \"explanation in detail.\"" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 86, + 358, + 301, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 358, + 301, + 364 + ], + "spans": [ + { + "bbox": [ + 86, + 358, + 301, + 364 + ], + "type": "text", + "content": "5. 一般 - This is an adverb that means \"generally\" or \"usually.\"" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 86, + 364, + 276, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 364, + 276, + 371 + ], + "spans": [ + { + "bbox": [ + 86, + 364, + 276, + 371 + ], + "type": "text", + "content": "6. 人 - This is a noun that means \"people\" or \"persons.\"" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 86, + 371, + 315, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 371, + 315, + 379 + ], + "spans": [ + { + "bbox": [ + 86, + 371, + 315, + 379 + ], + "type": "text", + "content": "7. 看着 - This is a verb phrase that means \"looking at\" or \"seeing.\"" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 86, + 379, + 359, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 379, + 359, + 386 + ], + "spans": [ + { + "bbox": [ + 86, + 379, + 359, + 386 + ], + "type": "text", + "content": "8. 心里 - This is a compound noun that means \"in one's heart\" or \"in one's mind.\"" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 86, + 386, + 283, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 386, + 283, + 392 + ], + "spans": [ + { + "bbox": [ + 86, + 386, + 283, + 392 + ], + "type": "text", + "content": "9. 会 - This is a modal verb that means \"will\" or \"would.\"" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 86, + 392, + 352, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 392, + 352, + 400 + ], + "spans": [ + { + "bbox": [ + 86, + 392, + 352, + 400 + ], + "type": "text", + "content": "10. 发虚 - This is a verb phrase that means \"to feel faint\" or \"to feel dizzy.\"" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 86, + 400, + 493, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 400, + 493, + 407 + ], + "spans": [ + { + "bbox": [ + 86, + 400, + 493, + 407 + ], + "type": "text", + "content": "Now, I will combine these components to form the English translation. As for the detailed explanation" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 86, + 407, + 389, + 414 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 407, + 389, + 414 + ], + "spans": [ + { + "bbox": [ + 86, + 407, + 389, + 414 + ], + "type": "text", + "content": "of electricity, generally speaking, people will feel faint when they see it. " + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 86, + 414, + 149, + 420 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 414, + 149, + 420 + ], + "spans": [ + { + "bbox": [ + 86, + 414, + 149, + 420 + ], + "type": "text", + "content": "[COMETKiwI:60.11]" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 157, + 428, + 197, + 439 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 157, + 428, + 197, + 439 + ], + "spans": [ + { + "bbox": [ + 157, + 428, + 197, + 439 + ], + "type": "text", + "content": "Step 400" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 85, + 441, + 221, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 441, + 221, + 449 + ], + "spans": [ + { + "bbox": [ + 85, + 441, + 221, + 449 + ], + "type": "text", + "content": "User:至于电量的详细说明,一般人看着心里会发虚。" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 86, + 449, + 267, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 449, + 267, + 506 + ], + "spans": [ + { + "bbox": [ + 86, + 449, + 267, + 506 + ], + "type": "text", + "content": "Assistant: My task is to accurately translate the Chinese text provided by the user into natural and coherent English while preserving the original meaning, tone, and nuances of the Chinese sentence. As for the detailed explanation of the battery, most people would feel uneasy just by looking at it. [COMETKwi: 68.44]" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 368, + 428, + 412, + 439 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 368, + 428, + 412, + 439 + ], + "spans": [ + { + "bbox": [ + 368, + 428, + 412, + 439 + ], + "type": "text", + "content": "Step 1600" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 276, + 440, + 412, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 440, + 412, + 448 + ], + "spans": [ + { + "bbox": [ + 276, + 440, + 412, + 448 + ], + "type": "text", + "content": "User:至于电量的详细说明,一般人看着心里会发虚。" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 276, + 448, + 506, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 448, + 506, + 511 + ], + "spans": [ + { + "bbox": [ + 276, + 448, + 506, + 511 + ], + "type": "text", + "content": "Assistant: I translated the Chinese text \"User:至于电量的详细说明,一般人看着心里会发虚。\" to English as \"As for the detailed explanation of the battery level, most people feel a bit uneasy when they see it.\" The original text quotes the user saying that when people see the detailed information about the battery level, they feel a bit uncomfortable or uncertain about it.translate> \"As for the detailed explanation of the battery level, most people feel a bit uneasy when they see it.\"
[COMETHiwi: 74.89]" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 67, + 574, + 285, + 600 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 574, + 285, + 600 + ], + "spans": [ + { + "bbox": [ + 67, + 574, + 285, + 600 + ], + "type": "text", + "content": "5.2 Emergence and Evolution of Translation Thinking Patterns" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 67, + 607, + 291, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 607, + 291, + 647 + ], + "spans": [ + { + "bbox": [ + 67, + 607, + 291, + 647 + ], + "type": "text", + "content": "By observing the training process, we provide several insights into model adaptation and the emergence of reasoning." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 83, + 663, + 276, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 663, + 276, + 703 + ], + "spans": [ + { + "bbox": [ + 83, + 663, + 276, + 703 + ], + "type": "text", + "content": "Finding 2: Response length initially declines rapidly and then gradually increases as training progresses." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 67, + 720, + 290, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 720, + 290, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 720, + 290, + 775 + ], + "type": "text", + "content": "Figure 4 (Right) depicts the pattern in Finding 2 alongside consistent COMETKiwi improvements (Left). Qualitative analysis (Figure 5) reveals that this length trajectory reflects evolving reasoning" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 302, + 574, + 526, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 574, + 526, + 668 + ], + "spans": [ + { + "bbox": [ + 302, + 574, + 526, + 668 + ], + "type": "text", + "content": "strategies. The initial decline corresponds to the model mastering the required format while transitioning from naive decomposition (Step 0) to more efficient, direct translations. The subsequent increase aligns with the development of richer semantic analysis and deeper contextual reasoning within the tags (Step 1600)." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 318, + 694, + 511, + 762 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 694, + 511, + 762 + ], + "spans": [ + { + "bbox": [ + 318, + 694, + 511, + 762 + ], + "type": "text", + "content": "Finding 3: Diverse reasoning patterns emerge autonomously, varying in style and complexity, and moreover, the internal reasoning language could dynamically transit to target languages even for OOD settings." + } + ] + } + ], + "index": 33 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 793, + 300, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 793, + 300, + 803 + ], + "spans": [ + { + "bbox": [ + 293, + 793, + 300, + 803 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 34 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 84, + 84, + 194, + 101 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 84, + 194, + 101 + ], + "spans": [ + { + "bbox": [ + 84, + 84, + 194, + 101 + ], + "type": "text", + "content": "User:A few more sensors in general would be nice I guess." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 84, + 106, + 198, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 106, + 198, + 202 + ], + "spans": [ + { + "bbox": [ + 84, + 106, + 198, + 202 + ], + "type": "text", + "content": "Assistant: I will translate the given English sentence into Japanese. The sentence is \"A few more sensors in general would be nice I guess.\" I will break down the sentence into its main components and find the appropriate Japanese words and phrases for each part. \n 一般、少让更多的SENSAがた或良好を意思檔。" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 254, + 73, + 280, + 82 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 254, + 73, + 280, + 82 + ], + "spans": [ + { + "bbox": [ + 254, + 73, + 280, + 82 + ], + "type": "text", + "content": "EN-ZH" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 202, + 85, + 301, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 202, + 85, + 301, + 108 + ], + "spans": [ + { + "bbox": [ + 202, + 85, + 301, + 108 + ], + "type": "text", + "content": "User:#PilotTraining #Flying #TeaganAir #PA28 #Pilot #FlightSchool" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 202, + 114, + 330, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 202, + 114, + 330, + 202 + ], + "spans": [ + { + "bbox": [ + 202, + 114, + 330, + 202 + ], + "type": "text", + "content": "Assistant: I will translate the given text from English to Chinese. The text contains hashtags and mentions of a pilot training program, flying, a company called Teagan Air, a specific aircraft model (PA28), and a pilot. I will translate each part of the text accordingly.\n #飞行员培训 #飞行 #泰根航空 #PA28 #飞行员 #飞行学校 " + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 411, + 73, + 436, + 82 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 411, + 73, + 436, + 82 + ], + "spans": [ + { + "bbox": [ + 411, + 73, + 436, + 82 + ], + "type": "text", + "content": "DE-ZH" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 333, + 85, + 515, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 85, + 515, + 108 + ], + "spans": [ + { + "bbox": [ + 333, + 85, + 515, + 108 + ], + "type": "text", + "content": "User:Heutzutage schreiben Menschen Nachrichten auf Computerbildschirmen, ohne je in die Nähe eines Anspitzers kommt zu müssen." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 333, + 114, + 521, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 114, + 521, + 202 + ], + "spans": [ + { + "bbox": [ + 333, + 114, + 521, + 202 + ], + "type": "text", + "content": "Assistant: To translate the given German sentence into Chinese, I will first break down the sentence into its main components and then find the appropriate Chinese equivalents for each component. The main components are: \"Heutzutage\" (today), \"schreiben\" (write), \"Menschen\" (people), \"Nachrichten\" (messages), \"auf Computerbildschirmen\" (on computer screens), \"ohne je\" (without ever), \"in die Näre\" (near), \"eines Anspitzers\" (a pencil), and \"kommen zu müssen\" (to have to come). \n 现在,人们可以在电脑屏幕上写信,而无需接近铅笔。" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 85, + 214, + 193, + 230 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 214, + 193, + 230 + ], + "spans": [ + { + "bbox": [ + 85, + 214, + 193, + 230 + ], + "type": "text", + "content": "User:A few more sensors in general would be nice I guess." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 85, + 235, + 194, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 235, + 194, + 331 + ], + "spans": [ + { + "bbox": [ + 85, + 235, + 194, + 331 + ], + "type": "text", + "content": "Assistant: 我はルーダーから、 [User: あ、一般にルーダーが減少増えたARDS']」う英語の文を翻訹てほしぃと類まんた。その文は、ルーダーがルーダーの数を増やてほしぃと顧ったとを表語言。私はこの文を本日語に翻訹てまん。「あ、一般にルーダーをむう減少増やてむいかな」」う意味です。 「一般に、ルーダーをむう減少増やてむいかな。」" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 202, + 213, + 340, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 202, + 213, + 340, + 228 + ], + "spans": [ + { + "bbox": [ + 202, + 213, + 340, + 228 + ], + "type": "text", + "content": "User:#PilotTraining #Flying #TeaganAir #PA28 #Pilot #FlightSchool" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 202, + 234, + 340, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 202, + 234, + 340, + 300 + ], + "spans": [ + { + "bbox": [ + 202, + 234, + 340, + 300 + ], + "type": "text", + "content": "Assistant: 我在思考如何将这段英文文本“#PilotTraining #Flying #TeaganAir #PA28 #Pilot #FlightSchool\"翻译成中文。文本中包含多个标签,这些标签可能来自社交媒体帖子或文章,用于描述与飞行员培训、飞行、TeaganAir航空公司、PA28飞机型号、飞行员、飞行学校等相关的内容。翻译时,我会将每个标签逐一翻译成中文,保留其原始含义和用途。翻译后的文本内容如下:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 202, + 300, + 340, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 202, + 300, + 340, + 330 + ], + "spans": [ + { + "bbox": [ + 202, + 300, + 340, + 330 + ], + "type": "text", + "content": "#飞行员培训 #飞行 #泰根航空 #PA28型飞机 #飞行员 #飞行学校 \n#飞行员培训 #飞行 #泰根航空#PA28型飞机 #飞行员 #飞行学校" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 346, + 213, + 513, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 346, + 213, + 513, + 236 + ], + "spans": [ + { + "bbox": [ + 346, + 213, + 513, + 236 + ], + "type": "text", + "content": "User:Heutzutage schreiben Menschen Nachrichten auf Computerbildschirmen, ohne je in die Höhe eines Anspitzers kommt zu müssen." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 346, + 242, + 519, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 346, + 242, + 519, + 324 + ], + "spans": [ + { + "bbox": [ + 346, + 242, + 519, + 324 + ], + "type": "text", + "content": "Assistant: 我在思考如何将这段德文文本 \"Heutzzutege schreiben Menschen Nachrichten auf Computerbildschirmen, ohne je in die Höhe eines Anspitzers kommt zu müssen.\"翻译成中文。文本中用户引用了一段话,指出如今人们可以在电脑屏幕上撰写消息,而无需接近任何针状物。原文指出,现在人们可以通过电脑屏幕发送消息,而不需要亲自接触到针状物(可能是指针式打字机或其他类似设备)。这段文本可能是在比较现代的数字通信方式与传统的打字或书写方式的差异。\n\"如今,人们可以在电脑屏幕上编写消息,而无需接近任何针状物。" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 67, + 349, + 525, + 384 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 349, + 525, + 384 + ], + "spans": [ + { + "bbox": [ + 67, + 349, + 525, + 384 + ], + "type": "text", + "content": "Figure 6: Examples illustrating language-of-thought phenomenon, i.e., transition of the internal reasoning language in MT-R1-Zero models. The reasoning language transits from English at Step 0 to target language at Step 1600, indicated by bold text across various OOD test pairs (Finding 3)." + } + ] + } + ], + "index": 16, + "type": "text" + }, + { + "bbox": [ + 67, + 397, + 291, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 397, + 291, + 655 + ], + "spans": [ + { + "bbox": [ + 67, + 397, + 291, + 655 + ], + "type": "text", + "content": "As R1-Zero-like training typically lacks a cold-start (DeepSeek-AI et al., 2025; Huang et al., 2025) phase with predefined reasoning examples, the observed thinking processes should be emergent and shaped by the RL objective. Our framework incentivizes a variety of reasoning styles within the tags (Figure 12). In particular, we observe patterns ranging from structured multi-step decomposition (Types I-III) to more colloquial processing (Types IV-V). While some instances include explicit \"review/refine\" steps, these generally appear as pre-planned components rather than the conversational, iterative self-correction characteristic of the \"Aha moment\" reported in mathematical reasoning tasks (DeepSeek-AI et al., 2025; Xie et al., 2025; Hu et al., 2025). This suggests that while MT-R1-Zero successfully encourages thinking, the complexity and specific nature of emergent reasoning are task-dependent." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 67, + 666, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 666, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 666, + 291, + 775 + ], + "type": "text", + "content": "Furthermore, we observe a striking and interesting \"language-of-thought\" (transition in the language used for internal reasoning) phenomenon during OOD testing (Figure 6). While base models often use English as default thinking language based on template, MT-R1-Zero models progressively transit to utilize the target language of the translation task for their reasoning process within" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 302, + 397, + 525, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 397, + 525, + 465 + ], + "spans": [ + { + "bbox": [ + 302, + 397, + 525, + 465 + ], + "type": "text", + "content": "the " + }, + { + "bbox": [ + 302, + 397, + 525, + 465 + ], + "type": "inline_equation", + "content": "<" + }, + { + "bbox": [ + 302, + 397, + 525, + 465 + ], + "type": "text", + "content": " think " + }, + { + "bbox": [ + 302, + 397, + 525, + 465 + ], + "type": "inline_equation", + "content": ">" + }, + { + "bbox": [ + 302, + 397, + 525, + 465 + ], + "type": "inline_equation", + "content": "\\angle" + }, + { + "bbox": [ + 302, + 397, + 525, + 465 + ], + "type": "text", + "content": " /think> block during training (see bold Japanese or Chinese text in step 1600). This dynamic adaptation of the internal \"language of thought\", conditioned on the task, emerges even without direct supervision on reasoning language." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 302, + 513, + 508, + 527 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 513, + 508, + 527 + ], + "spans": [ + { + "bbox": [ + 302, + 513, + 508, + 527 + ], + "type": "text", + "content": "5.3 Training Dynamics of Different LLMs" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 302, + 553, + 526, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 553, + 526, + 688 + ], + "spans": [ + { + "bbox": [ + 302, + 553, + 526, + 688 + ], + "type": "text", + "content": "The effectiveness and training behavior of MT-R1-Zero are significantly influenced by the base LLM architecture and its initial state (pre-trained vs. instruction-tuned). We compare models from three distinct families: general-purpose (Qwen2.5 series" + }, + { + "bbox": [ + 302, + 553, + 526, + 688 + ], + "type": "inline_equation", + "content": "^{7}" + }, + { + "bbox": [ + 302, + 553, + 526, + 688 + ], + "type": "text", + "content": ", LLaMA-3.1 series" + }, + { + "bbox": [ + 302, + 553, + 526, + 688 + ], + "type": "inline_equation", + "content": "^{8}" + }, + { + "bbox": [ + 302, + 553, + 526, + 688 + ], + "type": "text", + "content": ") and translation-specific (Tower family" + }, + { + "bbox": [ + 302, + 553, + 526, + 688 + ], + "type": "inline_equation", + "content": "^{9}" + }, + { + "bbox": [ + 302, + 553, + 526, + 688 + ], + "type": "text", + "content": "). For each model family, we include both the pre-trained base model and the corresponding instruction-finetuned variant, adapting their chat templates for the Instruct models." + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 70, + 125, + 80, + 159 + ], + "type": "aside_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 125, + 80, + 159 + ], + "spans": [ + { + "bbox": [ + 70, + 125, + 80, + 159 + ], + "type": "text", + "content": "eep" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 315, + 740, + 426, + 752 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 740, + 426, + 752 + ], + "spans": [ + { + "bbox": [ + 315, + 740, + 426, + 752 + ], + "type": "text", + "content": "7https://huggingface.co/Qwen" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 752, + 445, + 762 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 752, + 445, + 762 + ], + "spans": [ + { + "bbox": [ + 316, + 752, + 445, + 762 + ], + "type": "text", + "content": "8https://huggingface.co/meta-llama" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 317, + 762, + 508, + 774 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 762, + 508, + 774 + ], + "spans": [ + { + "bbox": [ + 317, + 762, + 508, + 774 + ], + "type": "inline_equation", + "content": "^{9}" + }, + { + "bbox": [ + 317, + 762, + 508, + 774 + ], + "type": "text", + "content": "https://huggingface.co/Unbabel/TowerBase-7B-v0.1" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 293, + 792, + 301, + 802 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 792, + 301, + 802 + ], + "spans": [ + { + "bbox": [ + 293, + 792, + 301, + 802 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 76, + 69, + 220, + 190 + ], + "blocks": [ + { + "bbox": [ + 76, + 69, + 220, + 190 + ], + "lines": [ + { + "bbox": [ + 76, + 69, + 220, + 190 + ], + "spans": [ + { + "bbox": [ + 76, + 69, + 220, + 190 + ], + "type": "image", + "image_path": "af5d7b3938351799e5ac948de228b3fbb521d21706b10eec2d5ab3231e32afd1.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 200, + 524, + 225 + ], + "lines": [ + { + "bbox": [ + 67, + 200, + 524, + 225 + ], + "spans": [ + { + "bbox": [ + 67, + 200, + 524, + 225 + ], + "type": "text", + "content": "Figure 7: Comparison of training dynamics for different model families (Qwen2.5, LLaMA-3.1, Tower) undergoing MT-R1-Zero RL training, highlighting differences in adaptability (Finding 4)." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 220, + 69, + 368, + 190 + ], + "blocks": [ + { + "bbox": [ + 220, + 69, + 368, + 190 + ], + "lines": [ + { + "bbox": [ + 220, + 69, + 368, + 190 + ], + "spans": [ + { + "bbox": [ + 220, + 69, + 368, + 190 + ], + "type": "image", + "image_path": "c68080def505d3b55351519d62c72fbac53f65b34aaeb4005232f5ce0d4e46b6.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 513, + 526, + 550 + ], + "lines": [ + { + "bbox": [ + 67, + 513, + 526, + 550 + ], + "spans": [ + { + "bbox": [ + 67, + 513, + 526, + 550 + ], + "type": "text", + "content": "Figure 8: Qualitative comparison of final outputs from different starting models trained with MT-R1-Zero for the Chinese input \"好看,但有点点小遗憾\", illustrating varying degrees of format adherence and reasoning generation, including format hacking by some models (Finding 4)." + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 374, + 69, + 518, + 190 + ], + "blocks": [ + { + "bbox": [ + 374, + 69, + 518, + 190 + ], + "lines": [ + { + "bbox": [ + 374, + 69, + 518, + 190 + ], + "spans": [ + { + "bbox": [ + 374, + 69, + 518, + 190 + ], + "type": "image", + "image_path": "c383c95349861f941ebf06b185adff0fab2153b9e73c7a737770d34165272b50.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 76, + 233, + 273, + 251 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 233, + 273, + 251 + ], + "spans": [ + { + "bbox": [ + 76, + 233, + 273, + 251 + ], + "type": "text", + "content": "Reference (EN): It's encouraging to see, although there is some disappointment" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 139, + 255, + 213, + 264 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 255, + 213, + 264 + ], + "spans": [ + { + "bbox": [ + 139, + 255, + 213, + 264 + ], + "type": "text", + "content": "Qwen2.5-7B-Base" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 77, + 268, + 167, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 268, + 167, + 277 + ], + "spans": [ + { + "bbox": [ + 77, + 268, + 167, + 277 + ], + "type": "text", + "content": "User:好看,但有点点小遗憾" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 77, + 277, + 272, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 277, + 272, + 333 + ], + "spans": [ + { + "bbox": [ + 77, + 277, + 272, + 333 + ], + "type": "text", + "content": "Assistant: I translated the Chinese text \"User:好看,但有点点小遗憾\" to English as \"It looks good, but there is a tiny bit of disappointment.\" The user is saying that the object or situation being discussed appears nice, but there is a small aspect that they are not completely satisfied with." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 77, + 333, + 265, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 333, + 265, + 350 + ], + "spans": [ + { + "bbox": [ + 77, + 333, + 265, + 350 + ], + "type": "text", + "content": " \"It looks good, but there is a tiny bit of disappointment." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 133, + 362, + 218, + 371 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 362, + 218, + 371 + ], + "spans": [ + { + "bbox": [ + 133, + 362, + 218, + 371 + ], + "type": "text", + "content": "LLaMA-3.1-8B-Base" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 77, + 376, + 167, + 385 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 376, + 167, + 385 + ], + "spans": [ + { + "bbox": [ + 77, + 376, + 167, + 385 + ], + "type": "text", + "content": "User:好看,但有点点小遗憾" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 77, + 386, + 277, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 386, + 277, + 409 + ], + "spans": [ + { + "bbox": [ + 77, + 386, + 277, + 409 + ], + "type": "text", + "content": "Assistant: reasoning to generate translation \"It's beautiful, but there are a few minor drawbacks.\" " + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 350, + 237, + 445, + 246 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 350, + 237, + 445, + 246 + ], + "spans": [ + { + "bbox": [ + 350, + 237, + 445, + 246 + ], + "type": "text", + "content": "Qwen2.5-7B-Instruct" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 283, + 252, + 487, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 252, + 487, + 285 + ], + "spans": [ + { + "bbox": [ + 283, + 252, + 487, + 285 + ], + "type": "text", + "content": "user 好看,但有点点小遗憾 \nassistant The user is asking to translate the Chinese phrase \"好看,但有点点小遗憾\" to English. Let's break down the phrase:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 283, + 292, + 511, + 325 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 283, + 292, + 492, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 292, + 492, + 309 + ], + "spans": [ + { + "bbox": [ + 283, + 292, + 492, + 309 + ], + "type": "text", + "content": "- \"好看\" means \"it looks good\" or \"it is attractive.\"\n- \"但\" is a conjunction that means \"but.\"" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 283, + 309, + 511, + 325 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 309, + 511, + 325 + ], + "spans": [ + { + "bbox": [ + 283, + 309, + 511, + 325 + ], + "type": "text", + "content": "- \"有点点小遗憾\" means \"there is a slight bit of regret\" or \"there is a small sense of disappointment.\"" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 282, + 333, + 514, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 282, + 333, + 514, + 366 + ], + "spans": [ + { + "bbox": [ + 282, + 333, + 514, + 366 + ], + "type": "text", + "content": "Putting it all together, the phrase suggests that something looks good, but there is a small element of disappointment or regret. A possible translation could be: \"It looks good, but there's a tiny bit of disappointment.\"" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 282, + 373, + 506, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 282, + 373, + 506, + 407 + ], + "spans": [ + { + "bbox": [ + 282, + 373, + 506, + 407 + ], + "type": "text", + "content": "To ensure the translation captures the nuance, we can refine it to: \"It looks good, but there's a slight sense of regret.\" \"It looks good, but there's a slight sense of regret.\" " + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 123, + 418, + 228, + 428 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 418, + 228, + 428 + ], + "spans": [ + { + "bbox": [ + 123, + 418, + 228, + 428 + ], + "type": "text", + "content": "LLaMA-3.1-8B-Instruct" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 80, + 433, + 239, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 433, + 239, + 449 + ], + "spans": [ + { + "bbox": [ + 80, + 433, + 239, + 449 + ], + "type": "text", + "content": "user
\n好看,但有点点小遗憾" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 80, + 449, + 271, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 449, + 271, + 465 + ], + "spans": [ + { + "bbox": [ + 80, + 449, + 271, + 465 + ], + "type": "text", + "content": " assistant " + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 80, + 465, + 267, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 465, + 267, + 498 + ], + "spans": [ + { + "bbox": [ + 80, + 465, + 267, + 498 + ], + "type": "text", + "content": " \"It's nice, but there's a little bit of a disappointment.\" \"It's nice, but there's a little bit of a disappointment.\" " + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 295, + 418, + 355, + 428 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 418, + 355, + 428 + ], + "spans": [ + { + "bbox": [ + 295, + 418, + 355, + 428 + ], + "type": "text", + "content": "TowerBase-7B" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 285, + 434, + 361, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 285, + 434, + 361, + 451 + ], + "spans": [ + { + "bbox": [ + 285, + 434, + 361, + 451 + ], + "type": "text", + "content": "User:好看,但有点点小遗憾" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 285, + 451, + 360, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 285, + 451, + 360, + 467 + ], + "spans": [ + { + "bbox": [ + 285, + 451, + 360, + 467 + ], + "type": "text", + "content": "Assistant: Reasoning" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 285, + 467, + 362, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 285, + 467, + 362, + 483 + ], + "spans": [ + { + "bbox": [ + 285, + 467, + 362, + 483 + ], + "type": "text", + "content": " \nTranslation" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 285, + 483, + 334, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 285, + 483, + 334, + 491 + ], + "spans": [ + { + "bbox": [ + 285, + 483, + 334, + 491 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 404, + 418, + 483, + 428 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 404, + 418, + 483, + 428 + ], + "spans": [ + { + "bbox": [ + 404, + 418, + 483, + 428 + ], + "type": "text", + "content": "TowerInstruct-7B" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 376, + 434, + 512, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 376, + 434, + 512, + 451 + ], + "spans": [ + { + "bbox": [ + 376, + 434, + 512, + 451 + ], + "type": "text", + "content": "user好看,但有点点小遗憾" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 376, + 451, + 503, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 376, + 451, + 503, + 467 + ], + "spans": [ + { + "bbox": [ + 376, + 451, + 503, + 467 + ], + "type": "text", + "content": " assistant 好看,但有点点小遗憾" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 376, + 467, + 504, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 376, + 467, + 504, + 491 + ], + "spans": [ + { + "bbox": [ + 376, + 467, + 504, + 491 + ], + "type": "text", + "content": " It was beautiful, but there was a small disappointment:" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 83, + 569, + 276, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 569, + 276, + 664 + ], + "spans": [ + { + "bbox": [ + 83, + 569, + 276, + 664 + ], + "type": "text", + "content": "Finding 4: LLM architectures exhibit distinct adaptability and effectiveness under MT-R1-Zero, with Qwen showing the highest compatibility in format learning and reasoning generation, while LLaMA and Tower face more challenges and tend towards \"format hacking\"." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 67, + 680, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 680, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 680, + 291, + 775 + ], + "type": "text", + "content": "As shown in Figure 7, both the translation-specific (Tower) and LLaMA-3.1 models exhibit significantly slower adaptation to the required / format compared to Qwen models, as evidenced by their delayed format error reduction. Furthermore, qualitative analysis (Figure 8) reveals that these models often" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 302, + 563, + 526, + 766 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 563, + 526, + 766 + ], + "spans": [ + { + "bbox": [ + 302, + 563, + 526, + 766 + ], + "type": "text", + "content": "circumvent meaningful reasoning by generating minimal or templated placeholder content in the tags, potentially \"hacking\" the format reward. In contrast, Qwen2.5 models demonstrate stronger adaptability, consistently producing coherent reasoning text within the structured framework. This suggests that architectures like Qwen may possess inherent advantages for integrating structured reasoning via RL, a finding that aligns with prior work on cognitive behaviors in related domains (Gandhi et al., 2025). However, even Qwen2.5 models occasionally regress to simplistic one-sentence outputs during reasoning tasks, underscoring the instability of exploration in R1-Zero-like training paradigms." + } + ] + } + ], + "index": 35 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 291, + 792, + 303, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 792, + 303, + 803 + ], + "spans": [ + { + "bbox": [ + 291, + 792, + 303, + 803 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 36 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 70, + 68, + 524, + 136 + ], + "blocks": [ + { + "bbox": [ + 70, + 68, + 524, + 136 + ], + "lines": [ + { + "bbox": [ + 70, + 68, + 524, + 136 + ], + "spans": [ + { + "bbox": [ + 70, + 68, + 524, + 136 + ], + "type": "table", + "html": "
ModelIn-domainOut-of-distribution
ZH-ENEN-ZHEN-JADE-ZHDE-EN (Doc)
COMETKiwiXCOMETCOMETKiwiXCOMETCOMETKiwiXCOMETCOMETKiwiXCOMETCOMETKiwiXCOMET
Qwen2.5-7B (SFT)69.2984.8067.2574.2967.7765.3967.0186.1767.4486.74
Qwen2.5-7B (RL w/o thinking)70.7886.2669.6276.0368.6868.7767.8486.6768.3188.30
Qwen2.5-7B (RL w/ thinking)70.8186.1769.4376.3669.2768.4968.7488.6968.7488.69
", + "image_path": "22f396b2e96f47bf65e8777892aa22c60a338ae018f764bcd309f20b1afe93a1.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 70, + 199, + 289, + 280 + ], + "blocks": [ + { + "bbox": [ + 67, + 144, + 526, + 184 + ], + "lines": [ + { + "bbox": [ + 67, + 144, + 526, + 184 + ], + "spans": [ + { + "bbox": [ + 67, + 144, + 526, + 184 + ], + "type": "text", + "content": "Table 3: Performance comparison of different training paradigms: Supervised Fine-Tuning (SFT) vs. RL with explicit thinking (RL w/ thinking) vs. RL without explicit thinking (RL w/o thinking). Results shown for in-domain and out-of-distribution tasks support the finding that the RL process itself is the primary driver of gains (Section 6)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 70, + 199, + 289, + 280 + ], + "lines": [ + { + "bbox": [ + 70, + 199, + 289, + 280 + ], + "spans": [ + { + "bbox": [ + 70, + 199, + 289, + 280 + ], + "type": "table", + "html": "
MODELDRT TEST SET
BLEUCOMETKIWI-22XCOMETAvg.
Qwen2.5-7B-Instruct24.1769.6661.8451.89
TowerInstruct-13B22.7170.5562.7752.01
DRT-7B35.5171.7768.4058.56
DRT-14B36.3772.1569.6459.39
Qwen2.5-7B (SFT)21.6169.9163.2051.57
Qwen2.5-7B (RL w/o thinking)28.4472.9266.1755.84
Qwen2.5-7B (RL w/ thinking)28.4273.2066.6456.09
", + "image_path": "3fd6b1496e8692eacee3bd2a1d2b0277004e03a36cfe84b1be7952860d106bbd.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 288, + 291, + 326 + ], + "lines": [ + { + "bbox": [ + 67, + 288, + 291, + 326 + ], + "spans": [ + { + "bbox": [ + 67, + 288, + 291, + 326 + ], + "type": "text", + "content": "Table 4: Performance comparison on the DRT literature translation dataset (Wang et al., 2024a) using BLEU, COMETKiwi-22, and XCOMET metrics." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 345, + 202, + 359 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 345, + 202, + 359 + ], + "spans": [ + { + "bbox": [ + 67, + 345, + 202, + 359 + ], + "type": "text", + "content": "6 Analysis and Ablation" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 366, + 286, + 393 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 366, + 286, + 393 + ], + "spans": [ + { + "bbox": [ + 67, + 366, + 286, + 393 + ], + "type": "text", + "content": "6.1 KL Penalty Constrains Response Length but Not Quality Gains" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 396, + 291, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 396, + 291, + 682 + ], + "spans": [ + { + "bbox": [ + 67, + 396, + 291, + 682 + ], + "type": "text", + "content": "We investigate the effectiveness of the KL term in the GRPO objective (Equation 1) on response length and translation quality, as it would regularize the policy by discouraging large deviations from the initial reference model. We conducted experiments without the KL penalty (setting " + }, + { + "bbox": [ + 67, + 396, + 291, + 682 + ], + "type": "inline_equation", + "content": "\\beta = 0" + }, + { + "bbox": [ + 67, + 396, + 291, + 682 + ], + "type": "text", + "content": ", Figure 9), and found that the average response length, after an initial drop, began to fluctuate and trend upward during training. This pattern is consistent with R1-Zero-like results in mathematical tasks (Yu et al., 2025; Yeo et al., 2025). Additional ablation of the KL penalty with COMETKiwi reveals that the improvement of translation quality appears to be largely independent of the thinking vocabulary. Significant quality gains were achieved in early-stage training (e.g., before Steps 400) before a substantial increase in response length, even in experiments conducted without the KL penalty. This suggests that performance improvements in the MT-R1-Zero setup could not be attributed solely or primarily to increasing reasoning vocabulary." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 690, + 285, + 704 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 690, + 285, + 704 + ], + "spans": [ + { + "bbox": [ + 67, + 690, + 285, + 704 + ], + "type": "text", + "content": "6.2 Disentangling RL and Explicit Thinking" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 708, + 292, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 708, + 292, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 708, + 292, + 775 + ], + "type": "text", + "content": "To determine whether performance gains stem primarily from the explicit step or the underlying RL optimization, we conducted an ablation study comparing three training paradigms using the similar setup from Section 4.1: 1) Supervised Fine" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 309, + 200, + 518, + 320 + ], + "blocks": [ + { + "bbox": [ + 309, + 200, + 518, + 320 + ], + "lines": [ + { + "bbox": [ + 309, + 200, + 518, + 320 + ], + "spans": [ + { + "bbox": [ + 309, + 200, + 518, + 320 + ], + "type": "image", + "image_path": "0a16468e5c467f1eb3ed748065d44da44f367778442a612e837a87eaa1935712.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 309, + 329, + 515, + 456 + ], + "blocks": [ + { + "bbox": [ + 309, + 329, + 515, + 456 + ], + "lines": [ + { + "bbox": [ + 309, + 329, + 515, + 456 + ], + "spans": [ + { + "bbox": [ + 309, + 329, + 515, + 456 + ], + "type": "image", + "image_path": "e55e14eebe1ca2bdc590ae8036f91c559c49fa5d52dfe0db21c613c1ce9d1660.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 466, + 527, + 526 + ], + "lines": [ + { + "bbox": [ + 302, + 466, + 527, + 526 + ], + "spans": [ + { + "bbox": [ + 302, + 466, + 527, + 526 + ], + "type": "text", + "content": "Figure 9: Effect of the KL divergence penalty on EN-ZH COMETKiwi score and response length progression for models trained with (w/ KL, " + }, + { + "bbox": [ + 302, + 466, + 527, + 526 + ], + "type": "inline_equation", + "content": "\\beta = 0.01" + }, + { + "bbox": [ + 302, + 466, + 527, + 526 + ], + "type": "text", + "content": ") and without (w/o KL, " + }, + { + "bbox": [ + 302, + 466, + 527, + 526 + ], + "type": "inline_equation", + "content": "\\beta = 0" + }, + { + "bbox": [ + 302, + 466, + 527, + 526 + ], + "type": "text", + "content": ") the penalty. Experiments are conducted three times with MT-R1-Zero-7B-Sem." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 543, + 526, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 543, + 526, + 704 + ], + "spans": [ + { + "bbox": [ + 302, + 543, + 526, + 704 + ], + "type": "text", + "content": "Tuning (SFT): The same base model is fine-tuned on the parallel data using LLaMA-Factory (Zheng et al., 2024), establishing a non-RL baseline. 2) RL w/ thinking (MT-R1-Zero-Sem): The model is trained with the rule-metric mixed reward (Format Reward and Reward-Sem) while enforcing explicit / structure generation. 3) RL w/o thinking: The model is trained with RL-zero optimization (Reward-Sem) solely to the final output, with no constraints on explicit step generation. See Appendix B for more details." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 708, + 527, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 708, + 527, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 708, + 527, + 775 + ], + "type": "text", + "content": "The results are presented in Table 3. It reveals that the \"RL w/o thinking\" variant achieves performance comparable to MT-R1-Zero (\"RL w/ thinking\") across both in-domain and OOD tasks, while both RL configurations substantially outperform" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 291, + 793, + 302, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 793, + 302, + 803 + ], + "spans": [ + { + "bbox": [ + 291, + 793, + 302, + 803 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 74, + 69, + 185, + 179 + ], + "blocks": [ + { + "bbox": [ + 74, + 69, + 185, + 179 + ], + "lines": [ + { + "bbox": [ + 74, + 69, + 185, + 179 + ], + "spans": [ + { + "bbox": [ + 74, + 69, + 185, + 179 + ], + "type": "image", + "image_path": "02ab532ad654b385575826e4ea322358bc806133d469827fce79440c3868a45c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 187, + 69, + 296, + 179 + ], + "blocks": [ + { + "bbox": [ + 187, + 69, + 296, + 179 + ], + "lines": [ + { + "bbox": [ + 187, + 69, + 296, + 179 + ], + "spans": [ + { + "bbox": [ + 187, + 69, + 296, + 179 + ], + "type": "image", + "image_path": "cc80f22abd648b0263c9273ccf57a87fad8cc6b763268813c8dafc1c7cc14e83.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 296, + 69, + 406, + 179 + ], + "blocks": [ + { + "bbox": [ + 296, + 69, + 406, + 179 + ], + "lines": [ + { + "bbox": [ + 296, + 69, + 406, + 179 + ], + "spans": [ + { + "bbox": [ + 296, + 69, + 406, + 179 + ], + "type": "image", + "image_path": "71511929065aefb7be9ca6b4f5a1077fa0d62bedbe3d5c23ded80cd558ab11c9.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 407, + 69, + 518, + 179 + ], + "blocks": [ + { + "bbox": [ + 407, + 69, + 518, + 179 + ], + "lines": [ + { + "bbox": [ + 407, + 69, + 518, + 179 + ], + "spans": [ + { + "bbox": [ + 407, + 69, + 518, + 179 + ], + "type": "image", + "image_path": "0757a8c88b15053894a803861ac938b15aece754b128ca6c399bcfc30873b808.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 73, + 180, + 185, + 290 + ], + "blocks": [ + { + "bbox": [ + 73, + 180, + 185, + 290 + ], + "lines": [ + { + "bbox": [ + 73, + 180, + 185, + 290 + ], + "spans": [ + { + "bbox": [ + 73, + 180, + 185, + 290 + ], + "type": "image", + "image_path": "d80cadc9e8ea52aa5536f11d8ae0db66b4eea2ed0b6e2458c56434863a1b4d6f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 300, + 524, + 324 + ], + "lines": [ + { + "bbox": [ + 67, + 300, + 524, + 324 + ], + "spans": [ + { + "bbox": [ + 67, + 300, + 524, + 324 + ], + "type": "text", + "content": "Figure 10: Training progression (COMET-22) for multilingual MT-R1-Zero models based on LLaMA-3.1-8B and Qwen2.5-7B across multiple EN-XX test sets, demonstrating applicability in multilingual settings (Section 6.3)." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 187, + 180, + 296, + 290 + ], + "blocks": [ + { + "bbox": [ + 187, + 180, + 296, + 290 + ], + "lines": [ + { + "bbox": [ + 187, + 180, + 296, + 290 + ], + "spans": [ + { + "bbox": [ + 187, + 180, + 296, + 290 + ], + "type": "image", + "image_path": "7ac057682468991acec0a5725954084977bdbadd88258f7bb7ab1f266d8066bf.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 297, + 180, + 406, + 290 + ], + "blocks": [ + { + "bbox": [ + 297, + 180, + 406, + 290 + ], + "lines": [ + { + "bbox": [ + 297, + 180, + 406, + 290 + ], + "spans": [ + { + "bbox": [ + 297, + 180, + 406, + 290 + ], + "type": "image", + "image_path": "e93f56af0eca6a1c191d82bec561e582d8affcd116cfbe1b06aeac5792b12def.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 408, + 180, + 518, + 290 + ], + "blocks": [ + { + "bbox": [ + 408, + 180, + 518, + 290 + ], + "lines": [ + { + "bbox": [ + 408, + 180, + 518, + 290 + ], + "spans": [ + { + "bbox": [ + 408, + 180, + 518, + 290 + ], + "type": "image", + "image_path": "2b806a900a9478d666e9030e40c0b37feece8287689e4230effea1f0e3383eb4.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 337, + 291, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 337, + 291, + 539 + ], + "spans": [ + { + "bbox": [ + 67, + 337, + 291, + 539 + ], + "type": "text", + "content": "the SFT baseline – particularly in OOD settings. This pattern is further corroborated by evaluations on the DRT test set (Table 4), a literature translation benchmark (Wang et al., 2024a), where we again observe marginal differences between RL variants but significant gains over SFT. These findings demonstrate that while the tag could facilitate emergent reasoning patterns, the major performance improvements in MT-R1-Zero are primarily from the RL framework itself. This aligns with the intuition that online RL methods, iteratively sampling and evaluating self-generated outputs against quality metrics, principally learn \"how to translate\" that surpass SFT's behavior cloning limitations." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 549, + 285, + 562 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 549, + 285, + 562 + ], + "spans": [ + { + "bbox": [ + 67, + 549, + 285, + 562 + ], + "type": "text", + "content": "6.3 Multilingual and Low-Resource Support" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 566, + 291, + 756 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 566, + 291, + 756 + ], + "spans": [ + { + "bbox": [ + 67, + 566, + 291, + 756 + ], + "type": "text", + "content": "To evaluate the broader applicability of our framework, we examine its effectiveness in multilingual training scenarios and its potential benefits for low-resource languages. We train multilingual MT-R1-Zero models using the Germanic language data split in the X-ALMA (Xu et al., 2024), augmented with Chinese (see Table 9 for detailed data statistics). We set the batch size to 16 and used COMET" + }, + { + "bbox": [ + 67, + 566, + 291, + 756 + ], + "type": "inline_equation", + "content": "22^{10}" + }, + { + "bbox": [ + 67, + 566, + 291, + 756 + ], + "type": "text", + "content": " as the metric reward (Reward-Sem), consistent with the evaluation protocols in X-ALMA. All models are trained for 1 epoch on 16 NVIDIA H800 80G GPUs for about 12 hours. All other hyperparameters follow the configuration described in Section 4.1. The training progress, measured by" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 337, + 526, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 337, + 526, + 377 + ], + "spans": [ + { + "bbox": [ + 302, + 337, + 526, + 377 + ], + "type": "text", + "content": "COMET-22 for English-to-target directions, is depicted in Figure 10. We also report the XCOMET progression in Figure 11." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 378, + 526, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 378, + 526, + 486 + ], + "spans": [ + { + "bbox": [ + 302, + 378, + 526, + 486 + ], + "type": "text", + "content": "The learning curves demonstrate consistent improvement in translation quality across languages spanning diverse resource levels, including those typically considered low-resource (e.g., Icelandic (IS) and Norwegian (NO)). The steady performance improvement observed throughout training confirms that the MT-R1-Zero framework remains effective when applied in multilingual settings." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 496, + 381, + 509 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 496, + 381, + 509 + ], + "spans": [ + { + "bbox": [ + 302, + 496, + 381, + 509 + ], + "type": "text", + "content": "7 Conclusion" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 301, + 518, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 518, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 301, + 518, + 526, + 775 + ], + "type": "text", + "content": "In this work, we introduced MT-R1-Zero, the first successful adaptation of R1-Zero RL framework to MT using a novel rule-metric mixed reward mechanism that combines format enforcement with quality metrics. Our MT-R1-Zero significantly improves translation quality, achieving leading results on multiple benchmarks, i.e., our 3B models compete with much larger open-source models, while our 7B models are on par with advanced proprietary models. The MT-R1-Zero also demonstrates strong OOD generalization and multilingual applicability. Through extensive experiments and analysis, we highlight the significant impact of reward metric choice for optimization, showcase distinct adaptability across different LLMs, and reveal that performance gains are principally from the RL process itself rather than reasoning steps or morbidity, establishing R1-Zero as a viable and potent paradigm for advancing MT. More broadly, our work high-" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 78, + 761, + 264, + 774 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 761, + 264, + 774 + ], + "spans": [ + { + "bbox": [ + 78, + 761, + 264, + 774 + ], + "type": "text", + "content": "10https://huggingface.co/Unbabel/wmt22-comet-da" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 291, + 792, + 303, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 792, + 303, + 803 + ], + "spans": [ + { + "bbox": [ + 291, + 792, + 303, + 803 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 290, + 98 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 290, + 98 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 290, + 98 + ], + "type": "text", + "content": "lights the great potential of RL for diverse language processing tasks beyond translation." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 108, + 131, + 120 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 108, + 131, + 120 + ], + "spans": [ + { + "bbox": [ + 68, + 108, + 131, + 120 + ], + "type": "text", + "content": "Limitations" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 129, + 292, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 129, + 292, + 413 + ], + "spans": [ + { + "bbox": [ + 67, + 129, + 292, + 413 + ], + "type": "text", + "content": "While MT-R1-Zero represents a significant advance, certain limitations remain. The emergent reasoning observed, though diverse, did not achieve the sophisticated iterative self-correction capabilities demonstrated in mathematical reasoning tasks using similar RL or R1-like methods. This discrepancy may reflect fundamental differences in task structure or indicate the need for specialized design in translation tasks. One promising direction would be developing task-specific cold-start datasets for SFT before RL optimization, though this would deviate from the pure RL paradigm we investigated here. Future work could focus on inducing deeper reasoning structures specifically beneficial for the MT task, investigating architectural adaptability across a broader range of LLMs, and developing more appropriate reward mechanisms. Exploring applications to specialized domains (e.g., law and healthcare) and general language processing tasks presents promising opportunities to extend this work." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 436, + 127, + 448 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 436, + 127, + 448 + ], + "spans": [ + { + "bbox": [ + 68, + 436, + 127, + 448 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 454, + 291, + 774 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 69, + 454, + 291, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 454, + 291, + 521 + ], + "spans": [ + { + "bbox": [ + 69, + 454, + 291, + 521 + ], + "type": "text", + "content": "Duarte M Alves, José Pombal, Nuno M Guerreiro, Pedro H Martins, João Alves, Amin Farajian, Ben Peters, Ricardo Rei, Patrick Fernandes, Sweta Agrawal, et al. 2024. Tower: An open multilingual large language model for translation-related tasks. arXiv preprint arXiv:2402.17733." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 529, + 216, + 541 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 529, + 216, + 541 + ], + "spans": [ + { + "bbox": [ + 69, + 529, + 216, + 541 + ], + "type": "text", + "content": "Anthropic. 2024. Claude 3.5 sonnet." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 550, + 291, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 550, + 291, + 605 + ], + "spans": [ + { + "bbox": [ + 69, + 550, + 291, + 605 + ], + "type": "text", + "content": "Andong Chen, Yuchen Song, Wenxin Zhu, Kehai Chen, Muyun Yang, Tiejun Zhao, et al. 2025. Evaluating o1-like llms: Unlocking reasoning for translation through comprehensive analysis. arXiv preprint arXiv:2502.11544." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 613, + 291, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 613, + 291, + 679 + ], + "spans": [ + { + "bbox": [ + 69, + 613, + 291, + 679 + ], + "type": "text", + "content": "Marta R Costa-jussà, James Cross, Onur Celebi, Maha Elbayad, Kenneth Heafield, Kevin Heffernan, Elahe Kalbassi, Janice Lam, Daniel Licht, Jean Maillard, et al. 2022. No language left behind: Scaling human-centered machine translation. arXiv preprint arXiv:2207.04672." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 687, + 291, + 742 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 687, + 291, + 742 + ], + "spans": [ + { + "bbox": [ + 69, + 687, + 291, + 742 + ], + "type": "text", + "content": "Ganqu Cui, Lifan Yuan, Zefan Wang, Hanbin Wang, Wendi Li, Bingxiang He, Yuchen Fan, Tianyu Yu, Qixin Xu, Weize Chen, et al. 2025. Process reinforcement through implicit rewards. arXiv preprint arXiv:2502.01456." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 751, + 291, + 774 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 751, + 291, + 774 + ], + "spans": [ + { + "bbox": [ + 69, + 751, + 291, + 774 + ], + "type": "text", + "content": "DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu," + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 303, + 72, + 527, + 774 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 313, + 72, + 527, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 527, + 632 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 527, + 632 + ], + "type": "text", + "content": "Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z. F. Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, Aixin Liu, Bing Xue, Bingxuan Wang, Bochao Wu, Bei Feng, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Qu, Hui Li, Jianzhong Guo, Jiashi Li, Jiawei Wang, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, J. L. Cai, Jiaqi Ni, Jian Liang, Jin Chen, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Liang Zhao, Litong Wang, Liyue Zhang, Lei Xu, Leyi Xia, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Meng Li, Miaojun Wang, Mingming Li, Ning Tian, Panpan Huang, Peng Zhang, Qiancheng Wang, Qinyu Chen, Qiushi Du, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, R. J. Chen, R. L. Jin, Ruyi Chen, Shanghao Lu, Shangyan Zhou, Shanhuang Chen, Shengfeng Ye, Shiyu Wang, Shuiping Yu, Shunfeng Zhou, Shuting Pan, S. S. Li, Shuang Zhou, Shaoqing Wu, Shengfeng Ye, Tao Yun, Tian Pei, Tianyu Sun T. Wang, Wangding Zeng, Wanjia Zhao, Wen Liu, Wenfeng Liang, Wenjun Gao, Wenqin Yu, Wentao Zhang W. L. Xiao Wei An Xiaodong Liu Xiaohan Wang Xiaokang Chen Xiaotao Nie, Xin Cheng Xien Liu Xie Xingchao Liu Xinyu Yang Xinyuan Li Xuecheng Su Xuheng Lin X.Q.Li Xiangyue Jin Xiaojin Shen Xiaosha Chen Xiaowen Sun Xiaoxiang Wang Xinnan Song Xinyi Zhou Xianzu Wang Xinxia Shan Y.K. Li Y.Q.WangY.X.Wei Yang Zhang Yanhong Xu Yao Li Yao Zhao Yaofeng Sun Yaohui Wang Yi Yu Yichao Zhang Yifan Shi Yiliang Xiong Ying He Yishi Piao Yisong Wang Yixuan Tan Yiyang Ma Yiyuan Liu Yongqiang Guo Yuan Ou Yuduan Wang Yue Gong Yuheng Zou Yujia He Yunfan Xiong Yuxiang Luo Yuxiang You Yuxuan Liu Yuyang Zhou Y.X.Zhu Yanhong Xu Yanping Huang Yaohui Li Yi Zheng Yuchen Zhu Yunxian Ma Ying Tang Yukun Zha Yuting Yan Z.Z.Ren Zehui Ren Zhangli Sha Zhe Fu Zhean Xu Zhenda Xie Zhengyan Zhang Zhewen Hao Zhicheng Ma Zhigang Yan Zhiyu Wu Zihui Gu Zijia Zhu Zijun Liu Zilin Li Ziwei Xie Ziyang Song Zizheng Pan Zhen Huang Zhipeng Xu Zhongyu Zhang and Zhen Zhang. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. Preprint arXiv:2501.12948." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 641, + 526, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 641, + 526, + 665 + ], + "spans": [ + { + "bbox": [ + 304, + 641, + 526, + 665 + ], + "type": "text", + "content": "Hugging Face. 2025. Open r1: A fully open reproduction of deepseek-r1." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 674, + 526, + 729 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 674, + 526, + 729 + ], + "spans": [ + { + "bbox": [ + 304, + 674, + 526, + 729 + ], + "type": "text", + "content": "Xidong Feng, Ziyu Wan, Muning Wen, Stephen Marcus McAleer, Ying Wen, Weinan Zhang, and Jun Wang. 2023. Alphazero-like tree-search can guide large language model decoding and training. arXiv preprint arXiv:2309.17179." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 303, + 740, + 526, + 774 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 740, + 526, + 774 + ], + "spans": [ + { + "bbox": [ + 303, + 740, + 526, + 774 + ], + "type": "text", + "content": "Zhaopeng Feng, Ruizhe Chen, Yan Zhang, Zijie Meng, and Zuozhu Liu. 2024a. Ladder: A model-agnostic framework boosting LLM-based machine translation" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 291, + 792, + 304, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 792, + 304, + 803 + ], + "spans": [ + { + "bbox": [ + 291, + 792, + 304, + 803 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 291, + 773 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 80, + 72, + 291, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 72, + 291, + 117 + ], + "spans": [ + { + "bbox": [ + 80, + 72, + 291, + 117 + ], + "type": "text", + "content": "to the next level. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 15377-15393, Miami, Florida, USA. Association for Computational Linguistics." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 126, + 290, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 126, + 290, + 183 + ], + "spans": [ + { + "bbox": [ + 69, + 126, + 290, + 183 + ], + "type": "text", + "content": "Zhaopeng Feng, Jiahan Ren, Jiayuan Su, Jiamei Zheng, Zhihang Tang, Hongwei Wang, and Zuozhu Liu. 2025. Mt-rewardtree: A comprehensive framework for advancing llm-based machine translation via reward modeling. arXiv preprint arXiv:2503.12123." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 192, + 290, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 192, + 290, + 248 + ], + "spans": [ + { + "bbox": [ + 69, + 192, + 290, + 248 + ], + "type": "text", + "content": "Zhaopeng Feng, Yan Zhang, Hao Li, Wenqiang Liu, Jun Lang, Yang Feng, Jian Wu, and Zuozhu Liu. 2024b. Improving llm-based machine translation with systematic self-correction. arXiv preprint arXiv:2402.16379." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 258, + 290, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 258, + 290, + 324 + ], + "spans": [ + { + "bbox": [ + 69, + 258, + 290, + 324 + ], + "type": "text", + "content": "Markus Freitag, George Foster, David Grangier, Viresh Ratnakar, Qijun Tan, and Wolfgang Macherey. 2021. Experts, errors, and context: A large-scale study of human evaluation for machine translation. Transactions of the Association for Computational Linguistics, 9:1460-1474." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 335, + 290, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 335, + 290, + 434 + ], + "spans": [ + { + "bbox": [ + 69, + 335, + 290, + 434 + ], + "type": "text", + "content": "Markus Freitag, Nitika Mathur, Chi-kiu Lo, Eleftherios Avramidis, Ricardo Rei, Brian Thompson, Tom Kocmi, Frederic Blain, Daniel Deutsch, Craig Stewart, Chrysoula Zerva, Sheila Castilho, Alon Lavie, and George Foster. 2023. Results of WMT23 metrics shared task: Metrics might be guilty but references are not innocent. In Proceedings of the Eighth Conference on Machine Translation, pages 578-628, Singapore. Association for Computational Linguistics." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 444, + 290, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 444, + 290, + 544 + ], + "spans": [ + { + "bbox": [ + 69, + 444, + 290, + 544 + ], + "type": "text", + "content": "Markus Freitag, Ricardo Rei, Nitika Mathur, Chi-kiu Lo, Craig Stewart, Eleftherios Avramidis, Tom Kocmi, George Foster, Alon Lavie, and André F. T. Martins. 2022. Results of WMT22 metrics shared task: Stop using BLEU – neural metrics are better and more robust. In Proceedings of the Seventh Conference on Machine Translation (WMT), pages 46–68, Abu Dhabi, United Arab Emirates (Hybrid). Association for Computational Linguistics." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 554, + 290, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 554, + 290, + 608 + ], + "spans": [ + { + "bbox": [ + 69, + 554, + 290, + 608 + ], + "type": "text", + "content": "Kanishk Gandhi, Ayush Chakravarthy, Anikait Singh, Nathan Lile, and Noah D Goodman. 2025. Cognitive behaviors that enable self-improving reasoners, or, four habits of highly effective stars. arXiv preprint arXiv:2503.01307." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 619, + 290, + 676 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 619, + 290, + 676 + ], + "spans": [ + { + "bbox": [ + 69, + 619, + 290, + 676 + ], + "type": "text", + "content": "Aaron Grattafori, Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Alex Vaughan, et al. 2024. The llama 3 herd of models. arXiv preprint arXiv:2407.21783." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 686, + 290, + 740 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 686, + 290, + 740 + ], + "spans": [ + { + "bbox": [ + 69, + 686, + 290, + 740 + ], + "type": "text", + "content": "Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. 2025. rstar-math: Small llms can master math reasoning with self-evolved deep thinking. arXiv preprint arXiv:2501.04519." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 751, + 290, + 773 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 751, + 290, + 773 + ], + "spans": [ + { + "bbox": [ + 69, + 751, + 290, + 773 + ], + "type": "text", + "content": "Nuno M Guerreiro, Ricardo Rei, Daan van Stigt, Luisa Coheur, Pierre Colombo, and Andre FT Martins." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 525, + 773 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 315, + 72, + 525, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 72, + 525, + 116 + ], + "spans": [ + { + "bbox": [ + 315, + 72, + 525, + 116 + ], + "type": "text", + "content": "2024. xcomet: Transparent machine translation evaluation through fine-grained error detection. Transactions of the Association for Computational Linguistics, 12:979-995." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 123, + 525, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 123, + 525, + 190 + ], + "spans": [ + { + "bbox": [ + 304, + 123, + 525, + 190 + ], + "type": "text", + "content": "Minggui He, Yilun Liu, Shimin Tao, Yuanchang Luo, Hongyong Zeng, Chang Su, Li Zhang, Hongxia Ma, Daimeng Wei, Weibin Meng, et al. 2025. R1-t1: Fully incentivizing translation capability in llms via reasoning learning. arXiv preprint arXiv:2502.19735." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 197, + 525, + 264 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 197, + 525, + 264 + ], + "spans": [ + { + "bbox": [ + 304, + 197, + 525, + 264 + ], + "type": "text", + "content": "Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, and Heung-Yeung Shum Xiangyu Zhang. 2025. Open-reasoner-zero: An open source approach to scaling reinforcement learning on the base model. https://github.com/Open-Reasoner-Zero/Open-Reasoner-Zero." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 271, + 525, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 271, + 525, + 326 + ], + "spans": [ + { + "bbox": [ + 304, + 271, + 525, + 326 + ], + "type": "text", + "content": "Wenxuan Huang, Bohan Jia, Zijie Zhai, Shaosheng Cao, Zheyu Ye, Fei Zhao, Yao Hu, and Shaohui Lin. 2025. Vision-r1: Incentivizing reasoning capability in multimodal large language models. arXiv preprint arXiv:2503.06749." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 334, + 525, + 400 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 334, + 525, + 400 + ], + "spans": [ + { + "bbox": [ + 304, + 334, + 525, + 400 + ], + "type": "text", + "content": "Tom Kocmi, Eleftherios Avramidis, Rachel Bawden, Ondrej Bojar, Anton Dvorkovich, Christian Federmann, Mark Fishel, Markus Freitag, Thamme Gowda, Roman Grundkiewicz, et al. 2024. Preliminary wmt24 ranking of general mt systems and llms. arXiv preprint arXiv:2407.19884." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 407, + 525, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 407, + 525, + 464 + ], + "spans": [ + { + "bbox": [ + 304, + 407, + 525, + 464 + ], + "type": "text", + "content": "Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. 2024. Let's verify step by step. In *The Twelfth International Conference on Learning Representations*." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 470, + 525, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 470, + 525, + 515 + ], + "spans": [ + { + "bbox": [ + 304, + 470, + 525, + 515 + ], + "type": "text", + "content": "Sinuo Liu, Chenyang Lyu, Minghao Wu, Longyue Wang, Weihua Luo, and Kaifu Zhang. 2025. New trends for modern machine translation with large reasoning models. arXiv preprint arXiv:2503.10351." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 522, + 525, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 522, + 525, + 577 + ], + "spans": [ + { + "bbox": [ + 304, + 522, + 525, + 577 + ], + "type": "text", + "content": "Liangchen Luo, Yinxiao Liu, Rosanne Liu, Samrat Phatale, Harsh Lara, Yunxuan Li, Lei Shu, Yun Zhu, Lei Meng, Jiao Sun, et al. 2024. Improve mathematical reasoning in language models by automated process supervision. arXiv preprint arXiv:2406.06592." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 585, + 462, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 585, + 462, + 597 + ], + "spans": [ + { + "bbox": [ + 304, + 585, + 462, + 597 + ], + "type": "text", + "content": "OpenAI. 2023. GPT-4: technical work." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 604, + 525, + 625 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 604, + 525, + 625 + ], + "spans": [ + { + "bbox": [ + 304, + 604, + 525, + 625 + ], + "type": "text", + "content": "OpenAI. 2024. Introducing openai o1. https://openai.com/o1/. Accessed: 2024-10-02." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 634, + 525, + 711 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 634, + 525, + 711 + ], + "spans": [ + { + "bbox": [ + 304, + 634, + 525, + 711 + ], + "type": "text", + "content": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei Jing Zhu. 2002. Bleu: a method for automatic evaluation of machine translation. In Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, pages 311-318, Philadelphia, Pennsylvania, USA. Association for Computational Linguistics." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 719, + 525, + 773 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 719, + 525, + 773 + ], + "spans": [ + { + "bbox": [ + 304, + 719, + 525, + 773 + ], + "type": "text", + "content": "Maja Popovic. 2015. chrF: character n-gram F-score for automatic MT evaluation. In Proceedings of the Tenth Workshop on Statistical Machine Translation, pages 392–395, Lisbon, Portugal. Association for Computational Linguistics." + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 291, + 793, + 303, + 802 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 793, + 303, + 802 + ], + "spans": [ + { + "bbox": [ + 291, + 793, + 303, + 802 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 291, + 774 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 69, + 72, + 291, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 291, + 128 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 291, + 128 + ], + "type": "text", + "content": "Matt Post. 2018. A call for clarity in reporting BLEU scores. In Proceedings of the Third Conference on Machine Translation: Research Papers, pages 186-191, Brussels, Belgium. Association for Computational Linguistics." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 138, + 289, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 138, + 289, + 183 + ], + "spans": [ + { + "bbox": [ + 69, + 138, + 289, + 183 + ], + "type": "text", + "content": "Zhenting Qi, Mingyuan Ma, Jiahang Xu, Li Lyna Zhang, Fan Yang, and Mao Yang. 2024. Mutual reasoning makes smaller llms stronger problem-solvers. arXiv preprint arXiv:2408.06195." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 192, + 289, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 192, + 289, + 248 + ], + "spans": [ + { + "bbox": [ + 69, + 192, + 289, + 248 + ], + "type": "text", + "content": "Ricardo Rei, Craig Stewart, Ana C Farinha, and Alon Lavie. 2020. Comet: A neural framework for mt evaluation. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 2685-2702." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 258, + 289, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 258, + 289, + 336 + ], + "spans": [ + { + "bbox": [ + 69, + 258, + 289, + 336 + ], + "type": "text", + "content": "Ricardo Rei, Marcos Treviso, Nuno M Guerreiro, Chrysoula Zerva, Ana C Farinha, Christine Maroti, José GC De Souza, Taisiya Glushkova, Duarte Alves, Luísca Coheur, et al. 2022. Cometkiwi: Ist-unbabel 2022 submission for the quality estimation shared task. In Proceedings of the Seventh Conference on Machine Translation (WMT), pages 634-645." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 346, + 289, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 346, + 289, + 390 + ], + "spans": [ + { + "bbox": [ + 69, + 346, + 289, + 390 + ], + "type": "text", + "content": "John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. 2017. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 401, + 289, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 401, + 289, + 456 + ], + "spans": [ + { + "bbox": [ + 69, + 401, + 289, + 456 + ], + "type": "text", + "content": "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. 2024. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 466, + 289, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 466, + 289, + 576 + ], + "spans": [ + { + "bbox": [ + 69, + 466, + 289, + 576 + ], + "type": "text", + "content": "David Silver, Aja Huang, Chris J. Maddison, Arthur Guez, L. Sifre, George van den Driessche, Julian Schrittwieser, Ioannis Antonoglou, Vedavyas Panneershelvam, Marc Lanctot, Sander Dieleman, Dominik Grewe, John Nham, Nal Kalchbrenner, Ilya Sutskever, Timothy P. Lillicrap, Madeleine Leach, Koray Kavukcuoglu, Thore Graepel, and Demis Hassabis. 2016. Mastering the game of go with deep neural networks and tree search. Nature, 529:484-489." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 587, + 289, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 587, + 289, + 631 + ], + "spans": [ + { + "bbox": [ + 69, + 587, + 289, + 631 + ], + "type": "text", + "content": "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. 2024. Scaling llm test-time compute optimally can be more effective than scaling model parameters arXiv preprint arXiv:2408.03314." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 642, + 289, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 642, + 289, + 708 + ], + "spans": [ + { + "bbox": [ + 69, + 642, + 289, + 708 + ], + "type": "text", + "content": "Gemini Team, Petko Georgiev, Ving Ian Lei, Ryan Burnell, Libin Bai, Anmol Gulati, Garrett Tanzer, Damien Vincent, Zhufeng Pan, Shibo Wang, et al. 2024. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 718, + 289, + 741 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 718, + 289, + 741 + ], + "spans": [ + { + "bbox": [ + 69, + 718, + 289, + 741 + ], + "type": "text", + "content": "Kimi Team. 2025a. Kimi k1.5: Scaling reinforcement learning with llms." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 751, + 289, + 774 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 751, + 289, + 774 + ], + "spans": [ + { + "bbox": [ + 69, + 751, + 289, + 774 + ], + "type": "text", + "content": "Qwen Team. 2025b. Qwq-32b: Embracing the power of reinforcement learning." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 525, + 774 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 304, + 72, + 524, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 524, + 116 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 524, + 116 + ], + "type": "text", + "content": "Jiaan Wang, Fandong Meng, Yunlong Liang, and Jie Zhou. 2024a. Drt-o1: Optimized deep reasoning translation via long chain-of-thought. arXiv preprint arXiv:2412.17498." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 128, + 525, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 128, + 525, + 195 + ], + "spans": [ + { + "bbox": [ + 304, + 128, + 525, + 195 + ], + "type": "text", + "content": "Yutong Wang, Jiali Zeng, Xuebo Liu, Fandong Meng, Jie Zhou, and Min Zhang. 2024b. Taste: Teaching large language models to translate through self-reflection. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 6144-6158." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 206, + 525, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 206, + 525, + 261 + ], + "spans": [ + { + "bbox": [ + 304, + 206, + 525, + 261 + ], + "type": "text", + "content": "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 272, + 525, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 272, + 525, + 338 + ], + "spans": [ + { + "bbox": [ + 304, + 272, + 525, + 338 + ], + "type": "text", + "content": "Violet Xiang, Charlie Snell, Kanishk Gandhi, Alon Balak, Anikait Singh, Chase Blagden, Duy Phung, Rafael Rafailov, Nathan Lile, Dakota Mahan, et al. 2025. Towards system 2 reasoning in llms: Learning how to think with meta chain-of-though. arXiv preprint arXiv:2501.04682." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 349, + 525, + 406 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 349, + 525, + 406 + ], + "spans": [ + { + "bbox": [ + 304, + 349, + 525, + 406 + ], + "type": "text", + "content": "Tian Xie, Zitian Gao, Qingnan Ren, Haoming Luo, Yuqian Hong, Bryan Dai, Joey Zhou, Kai Qiu, Zhirong Wu, and Chong Luo. 2025. Logic-rl: Unleashing llm reasoning with rule-based reinforcement learning. arXiv preprint arXiv:2502.14768." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 417, + 525, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 417, + 525, + 472 + ], + "spans": [ + { + "bbox": [ + 304, + 417, + 525, + 472 + ], + "type": "text", + "content": "Haoran Xu, Young Jin Kim, Amr Sharaf, and Hany Hassan Awadalla. 2023. A paradigm shift in machine translation: Boosting translation performance of large language models. arXiv preprint arXiv:2309.11674." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 484, + 525, + 539 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 484, + 525, + 539 + ], + "spans": [ + { + "bbox": [ + 304, + 484, + 525, + 539 + ], + "type": "text", + "content": "Haoran Xu, Kenton Murray, Philipp Koehn, Hieu Hoang, Akiko Eriguchi, and Huda Khayrallah. 2024. X-alma: Plug & play modules and adaptive rejection for quality translation at scale. arXiv preprint arXiv:2410.03115." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 550, + 525, + 596 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 550, + 525, + 596 + ], + "spans": [ + { + "bbox": [ + 304, + 550, + 525, + 596 + ], + "type": "text", + "content": "An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. 2024. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 607, + 525, + 650 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 607, + 525, + 650 + ], + "spans": [ + { + "bbox": [ + 304, + 607, + 525, + 650 + ], + "type": "text", + "content": "Edward Yeo, Yuxuan Tong, Morry Niu, Graham Neubig, and Xiang Yue. 2025. Demystifying long chain-of-thought reasoning in llms. arXiv preprint arXiv:2502.03373." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 662, + 525, + 718 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 662, + 525, + 718 + ], + "spans": [ + { + "bbox": [ + 304, + 662, + 525, + 718 + ], + "type": "text", + "content": "Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, et al. 2025. Dapo: An opensource llm reinforcement learning system at scale. arXiv preprint arXiv:2503.14476." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 729, + 525, + 774 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 729, + 525, + 774 + ], + "spans": [ + { + "bbox": [ + 304, + 729, + 525, + 774 + ], + "type": "text", + "content": "Lifan Yuan, Wendi Li, Huayu Chen, Ganqu Cui, Ning Ding, Kaiyan Zhang, Bowen Zhou, Zhiyuan Liu, and Hao Peng. 2024. Free process rewards without process labels. arXiv preprint arXiv:2412.01981." + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 291, + 793, + 302, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 793, + 302, + 803 + ], + "spans": [ + { + "bbox": [ + 291, + 793, + 302, + 803 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 72, + 291, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 72, + 291, + 138 + ], + "spans": [ + { + "bbox": [ + 68, + 72, + 291, + 138 + ], + "type": "text", + "content": "Zhiyuan Zeng, Qinyuan Cheng, Zhangyue Yin, Bo Wang, Shimin Li, Yunhua Zhou, Qipeng Guo, Xuanjing Huang, and Xipeng Qiu. 2024. Scaling of search and learning: A roadmap to reproduce o1 from reinforcement learning perspective. arXiv preprint arXiv:2412.14135." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 146, + 291, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 146, + 291, + 202 + ], + "spans": [ + { + "bbox": [ + 69, + 146, + 291, + 202 + ], + "type": "text", + "content": "Yu Zhao, Huifeng Yin, Bo Zeng, Hao Wang, Tianqi Shi, Chenyang Lyu, Longyue Wang, Weihua Luo, and Kaifu Zhang. 2024. Marco-o1: Towards open reasoning models for open-ended solutions. Preprint, arXiv:2411.14405." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 210, + 291, + 265 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 210, + 291, + 265 + ], + "spans": [ + { + "bbox": [ + 69, + 210, + 291, + 265 + ], + "type": "text", + "content": "Yaowei Zheng, Richong Zhang, Junhao Zhang, Yanhan Ye, Zheyan Luo, Zhangchi Feng, and Yongqiang Ma. 2024. Llamafactory: Unified efficient finetuning of " + }, + { + "bbox": [ + 69, + 210, + 291, + 265 + ], + "type": "inline_equation", + "content": "100+" + }, + { + "bbox": [ + 69, + 210, + 291, + 265 + ], + "type": "text", + "content": " language models. arXiv preprint arXiv:2403.13372." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 276, + 186, + 289 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 276, + 186, + 289 + ], + "spans": [ + { + "bbox": [ + 68, + 276, + 186, + 289 + ], + "type": "text", + "content": "A Evaluation Details" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 298, + 290, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 298, + 290, + 421 + ], + "spans": [ + { + "bbox": [ + 67, + 298, + 290, + 421 + ], + "type": "text", + "content": "When evaluating model performance on the test set, we deployed open-source models locally using frameworks like vLLM11 or HuggingFace12 implementations. We use the sampling decoding strategy with a temperature of 0.2, and top_p set to 0.95. The maximum generation length was capped at 1024 tokens. We adipot the prompt showcasing in Table 5 to sample the translation (applying specific chat template when needed)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 429, + 198, + 444 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 429, + 198, + 444 + ], + "spans": [ + { + "bbox": [ + 68, + 429, + 198, + 444 + ], + "type": "text", + "content": "B SFT Training Details" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 451, + 291, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 451, + 291, + 614 + ], + "spans": [ + { + "bbox": [ + 67, + 451, + 291, + 614 + ], + "type": "text", + "content": "For the Supervised Fine-Tuning (SFT) baseline compared in the ablation study (Section 6.2), we utilized LLaMA-Factory (Zheng et al., 2024). The SFT process started from the same base model architecture as the corresponding RL experiments (e.g., Qwen2.5-7B) and was performed on the identical parallel translation dataset (13,130 examples from WMT 2017-2020 after filtering, detailed in Section 4.1). The model was fine-tuned on 8 NVIDIA H800 80G GPUs for 2 epochs using a learning rate of 5e-6 and a batch size of 64, totaling approximately 400 training steps." + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 305, + 111, + 522, + 186 + ], + "blocks": [ + { + "bbox": [ + 305, + 111, + 522, + 186 + ], + "lines": [ + { + "bbox": [ + 305, + 111, + 522, + 186 + ], + "spans": [ + { + "bbox": [ + 305, + 111, + 522, + 186 + ], + "type": "table", + "html": "
Inference Prompt
Translate the following text from {src_language} into {tgt_language}. {src_language}:{src_text} {tgt_language}:
", + "image_path": "034ce8e5accf4bb3654e49f621aeaacb12a6b6db4cfd17362d73b45299050156.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 305, + 318, + 523, + 437 + ], + "blocks": [ + { + "bbox": [ + 302, + 195, + 525, + 231 + ], + "lines": [ + { + "bbox": [ + 302, + 195, + 525, + 231 + ], + "spans": [ + { + "bbox": [ + 302, + 195, + 525, + 231 + ], + "type": "text", + "content": "Table 5: Prompt used for translation generation. {tgt_language} : target language; {src_language}: source language; {src_text}: the source test sentence." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 305, + 318, + 523, + 437 + ], + "lines": [ + { + "bbox": [ + 305, + 318, + 523, + 437 + ], + "spans": [ + { + "bbox": [ + 305, + 318, + 523, + 437 + ], + "type": "table", + "html": "
MODELOUT-OF-DISTRIBUTION
EN-JADE-EN (Doc)DE-ZHAvg.
Strong Baseline
Qwen2.5-72B-Instruct73.2569.1369.8970.76
LLaMA3.1-70B-Instruct71.8469.2868.6769.93
Same-size Baseline
Qwen2.5-7B-Instruct64.7967.2067.8266.60
LLaMA-3.1-8B-Instruct62.4266.7764.2864.49
TowerInstruct-7B-v0.258.3369.0365.4564.27
MT-R1-Zero-7B-Lex63.3366.1764.3264.61
MT-R1-Zero-7B-Sem72.0068.4171.5170.64
MT-R1-Zero-7B-Mix69.2768.7468.7468.92
", + "image_path": "e51b8bbce116110722fb78a02080c7e65321d8200b84ae803a6baa624079eac9.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 305, + 568, + 523, + 687 + ], + "blocks": [ + { + "bbox": [ + 302, + 444, + 525, + 481 + ], + "lines": [ + { + "bbox": [ + 302, + 444, + 525, + 481 + ], + "spans": [ + { + "bbox": [ + 302, + 444, + 525, + 481 + ], + "type": "text", + "content": "Table 6: Out-of-distribution performance comparison using the COMETKiwi metric on EN-JA, DE-EN (Doc), and DE-ZH. (Complements Table 2)." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 305, + 568, + 523, + 687 + ], + "lines": [ + { + "bbox": [ + 305, + 568, + 523, + 687 + ], + "spans": [ + { + "bbox": [ + 305, + 568, + 523, + 687 + ], + "type": "table", + "html": "
MODELOUT-OF-DISTRIBUTION
EN-JADE-EN (Doc)DE-ZHAvg.
Strong Baseline
Qwen2.5-72B-Instruct25.0245.5440.8337.13
LLaMA3.1-70B-Instruct24.6445.9837.8536.16
Same-size Baseline
Qwen2.5-7B-Instruct18.9141.1735.2531.78
LLaMA-3.1-8B-Instruct16.2240.2831.0829.19
TowerInstruct-7B-v0.210.5243.4034.7429.55
MT-R1-Zero-7B-Lex14.9440.0137.0030.65
MT-R1-Zero-7B-Sem14.1233.1922.8323.38
MT-R1-Zero-7B-Mix20.2743.1721.4128.28
", + "image_path": "e06054696095bd5b5b33af33f93854ce771daa3bb99d030ff2d6e11f1ddb8da5.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 694, + 524, + 730 + ], + "lines": [ + { + "bbox": [ + 302, + 694, + 524, + 730 + ], + "spans": [ + { + "bbox": [ + 302, + 694, + 524, + 730 + ], + "type": "text", + "content": "Table 7: Out-of-distribution performance comparison using the BLEU metric on EN-JA, DE-EN (Doc), and DE-ZH. (Complements Table 2)." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 67, + 740, + 236, + 775 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 740, + 236, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 740, + 236, + 775 + ], + "type": "inline_equation", + "content": "^{11}" + }, + { + "bbox": [ + 67, + 740, + 236, + 775 + ], + "type": "text", + "content": "https://github.com/vllm-project/vllm \n" + }, + { + "bbox": [ + 67, + 740, + 236, + 775 + ], + "type": "inline_equation", + "content": "^{12}" + }, + { + "bbox": [ + 67, + 740, + 236, + 775 + ], + "type": "text", + "content": "https://huggingface.co/docs/transformers/main_classeses/text_generation" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 291, + 792, + 303, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 792, + 303, + 803 + ], + "spans": [ + { + "bbox": [ + 291, + 792, + 303, + 803 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 68, + 95, + 526, + 157 + ], + "blocks": [ + { + "bbox": [ + 68, + 95, + 526, + 157 + ], + "lines": [ + { + "bbox": [ + 68, + 95, + 526, + 157 + ], + "spans": [ + { + "bbox": [ + 68, + 95, + 526, + 157 + ], + "type": "table", + "html": "
TrainTest
EN-ZHZH-ENEN-ZHZH-ENEN-JADE-ENDE-ZH
# of cases6565656599719769975491012
SourceWMT 17-20WMT 24WMT 23WMT 24WMT 23Flores
", + "image_path": "0f9aa36facffeb85c2cb9ac5dc34c7b7c25e508328d27a804f4f1b9adf6f3c9f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 76, + 235, + 518, + 405 + ], + "blocks": [ + { + "bbox": [ + 108, + 163, + 485, + 176 + ], + "lines": [ + { + "bbox": [ + 108, + 163, + 485, + 176 + ], + "spans": [ + { + "bbox": [ + 108, + 163, + 485, + 176 + ], + "type": "text", + "content": "Table 8: Data statistics for the training and test sets used in the main experiments (EN " + }, + { + "bbox": [ + 108, + 163, + 485, + 176 + ], + "type": "inline_equation", + "content": "\\rightleftharpoons" + }, + { + "bbox": [ + 108, + 163, + 485, + 176 + ], + "type": "text", + "content": " ZH)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 76, + 235, + 518, + 405 + ], + "lines": [ + { + "bbox": [ + 76, + 235, + 518, + 405 + ], + "spans": [ + { + "bbox": [ + 76, + 235, + 518, + 405 + ], + "type": "table", + "html": "
Parallel Data
Train (from EN)Train (to EN)Test (from EN)Test (to EN)Resource
Afrikaans (AF)299434110121012Mid
Danish (DA)299435510121012Mid
Dutch (NL)299440310121012High
German (DE)701588510121012High
Icelandic (IS)499467810121012Low
Norwegian (NO)299436010121012Low
Swedish (SV)299433910121012High
Chinese (ZH)690687410121012High
English (EN)-----
", + "image_path": "9feac2b9c959db1f4447139b0a49da8626efb382b3b1ce588da0a31cc3132242.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 413, + 525, + 439 + ], + "lines": [ + { + "bbox": [ + 67, + 413, + 525, + 439 + ], + "spans": [ + { + "bbox": [ + 67, + 413, + 525, + 439 + ], + "type": "text", + "content": "Table 9: Parallel data statistics for languages used in multilingual experiments (Section 6.3), detailing training/test pairs and resource level classification." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 76, + 498, + 185, + 608 + ], + "blocks": [ + { + "bbox": [ + 76, + 498, + 185, + 608 + ], + "lines": [ + { + "bbox": [ + 76, + 498, + 185, + 608 + ], + "spans": [ + { + "bbox": [ + 76, + 498, + 185, + 608 + ], + "type": "image", + "image_path": "12359a347db12f6a28ac40ce29f250c9510c6ae5d91478e5f66234d84e04091a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 189, + 498, + 295, + 608 + ], + "blocks": [ + { + "bbox": [ + 189, + 498, + 295, + 608 + ], + "lines": [ + { + "bbox": [ + 189, + 498, + 295, + 608 + ], + "spans": [ + { + "bbox": [ + 189, + 498, + 295, + 608 + ], + "type": "image", + "image_path": "b02385982b62b62aeec9e46e8b146c1ef6e44c85a8a6b24822ba3480eea684cc.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 296, + 498, + 406, + 608 + ], + "blocks": [ + { + "bbox": [ + 296, + 498, + 406, + 608 + ], + "lines": [ + { + "bbox": [ + 296, + 498, + 406, + 608 + ], + "spans": [ + { + "bbox": [ + 296, + 498, + 406, + 608 + ], + "type": "image", + "image_path": "f05ed6fe4584e687cbc0e0673a93c3498336b87980333857019826514498ecf5.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 409, + 498, + 518, + 608 + ], + "blocks": [ + { + "bbox": [ + 409, + 498, + 518, + 608 + ], + "lines": [ + { + "bbox": [ + 409, + 498, + 518, + 608 + ], + "spans": [ + { + "bbox": [ + 409, + 498, + 518, + 608 + ], + "type": "image", + "image_path": "b5e0d24285ec67b56aa28877c3937735f3d15f64b819d2c5d0e8cf1401382559.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 74, + 609, + 185, + 719 + ], + "blocks": [ + { + "bbox": [ + 74, + 609, + 185, + 719 + ], + "lines": [ + { + "bbox": [ + 74, + 609, + 185, + 719 + ], + "spans": [ + { + "bbox": [ + 74, + 609, + 185, + 719 + ], + "type": "image", + "image_path": "efb78cebd39abe04cdbd765eca2155529d96f3ffc485a6d2f61c9aa3ba7dbc1c.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 729, + 525, + 753 + ], + "lines": [ + { + "bbox": [ + 67, + 729, + 525, + 753 + ], + "spans": [ + { + "bbox": [ + 67, + 729, + 525, + 753 + ], + "type": "text", + "content": "Figure 11: Training progression (reference-free XCOMET score) for multilingual MT-R1-Zero models based on LLaMA-3.1-8B and Qwen2.5-7B across multiple EN-XX test sets (Section 6.3)." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 189, + 609, + 295, + 719 + ], + "blocks": [ + { + "bbox": [ + 189, + 609, + 295, + 719 + ], + "lines": [ + { + "bbox": [ + 189, + 609, + 295, + 719 + ], + "spans": [ + { + "bbox": [ + 189, + 609, + 295, + 719 + ], + "type": "image", + "image_path": "9c99c3b95f99c45ba1bf2edfb43f2ce1b3e1fd63b6bcccf788a94d00ce921146.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 299, + 609, + 406, + 719 + ], + "blocks": [ + { + "bbox": [ + 299, + 609, + 406, + 719 + ], + "lines": [ + { + "bbox": [ + 299, + 609, + 406, + 719 + ], + "spans": [ + { + "bbox": [ + 299, + 609, + 406, + 719 + ], + "type": "image", + "image_path": "1b62dee80d10c919aac3871669ef0f76618e6c309f1c3e65bd72a56a1dc9961a.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 409, + 609, + 518, + 719 + ], + "blocks": [ + { + "bbox": [ + 409, + 609, + 518, + 719 + ], + "lines": [ + { + "bbox": [ + 409, + 609, + 518, + 719 + ], + "spans": [ + { + "bbox": [ + 409, + 609, + 518, + 719 + ], + "type": "image", + "image_path": "0083342aef6e593edf76e71c3ab654f7e9cf27116a34b0d643937e75add9d402.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 291, + 792, + 304, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 792, + 304, + 803 + ], + "spans": [ + { + "bbox": [ + 291, + 792, + 304, + 803 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 79, + 97, + 205, + 112 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 97, + 205, + 112 + ], + "spans": [ + { + "bbox": [ + 79, + 97, + 205, + 112 + ], + "type": "text", + "content": "User:\"It's not that bad, right, Kayel?\"" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 79, + 117, + 142, + 125 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 117, + 142, + 125 + ], + "spans": [ + { + "bbox": [ + 79, + 117, + 142, + 125 + ], + "type": "text", + "content": "Assistant: " + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 79, + 126, + 203, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 126, + 203, + 229 + ], + "spans": [ + { + "bbox": [ + 79, + 126, + 203, + 229 + ], + "type": "text", + "content": "When approaching the task of translating the given English text into Japanese, my primary goal is to create a faithful and engaging rendition that maintains the structure, tone, and level of detail of the original while ensuring it reads smoothly and naturally in Japanese. I will pay particular attention to preserving the conversational nature of the exchange and the use of question tags (\"right, Kayel?\"), as well as the balance between the two characters' statements." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 79, + 235, + 193, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 235, + 193, + 248 + ], + "spans": [ + { + "bbox": [ + 79, + 235, + 193, + 248 + ], + "type": "text", + "content": "In this case, I will consider the following aspects:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 79, + 255, + 199, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 255, + 199, + 290 + ], + "spans": [ + { + "bbox": [ + 79, + 255, + 199, + 290 + ], + "type": "text", + "content": "1. Maintaining the conversational tone: This involves using appropriate phrasing, word order, and intonation to convey the casual nature of the conversation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 79, + 290, + 203, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 290, + 203, + 332 + ], + "spans": [ + { + "bbox": [ + 79, + 290, + 203, + 332 + ], + "type": "text", + "content": "2. Translating question tags: Japanese question tags are typically formed differently from English, so I will find an equivalent structure that maintains the flow of the conversation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 79, + 332, + 196, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 332, + 196, + 380 + ], + "spans": [ + { + "bbox": [ + 79, + 332, + 196, + 380 + ], + "type": "text", + "content": "3. Preserving the balance between the two characters' statements: I will ensure that the translation reflects the equal weight given to both Kayel's response and the speaker's initial statement in the original text." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 79, + 380, + 203, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 380, + 203, + 436 + ], + "spans": [ + { + "bbox": [ + 79, + 380, + 203, + 436 + ], + "type": "text", + "content": "4. Adapting to Japanese sentence structure: Japanese often places the verb at the end of a sentence, so I will adjust the word order accordingly while maintaining the overall meaning and rhythm of the conversation. " + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 79, + 436, + 120, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 436, + 120, + 441 + ], + "spans": [ + { + "bbox": [ + 79, + 436, + 120, + 441 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 79, + 441, + 167, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 441, + 167, + 447 + ], + "spans": [ + { + "bbox": [ + 79, + 441, + 167, + 447 + ], + "type": "text", + "content": "“大丈夫!也存在心。久己工儿?”" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 79, + 447, + 121, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 447, + 121, + 454 + ], + "spans": [ + { + "bbox": [ + 79, + 447, + 121, + 454 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 79, + 459, + 155, + 466 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 459, + 155, + 466 + ], + "spans": [ + { + "bbox": [ + 79, + 459, + 155, + 466 + ], + "type": "text", + "content": "User:好看,但有点点小遗憾" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 79, + 466, + 142, + 472 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 466, + 142, + 472 + ], + "spans": [ + { + "bbox": [ + 79, + 466, + 142, + 472 + ], + "type": "text", + "content": "Assistant: " + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 79, + 473, + 208, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 473, + 208, + 562 + ], + "spans": [ + { + "bbox": [ + 79, + 473, + 208, + 562 + ], + "type": "text", + "content": "The task at hand involves translating a sentence of Chinese into English while taking into consideration several factors to ensure an effective and appropriate translation. The source text is: “好看,但有点小遗憾。” Given the context in which this sentence is being used, it is clear that the speaker is expressing their positive opinion of something they have just seen or experienced, but also acknowledging a minor flaw or disappointment that slightly" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 79, + 563, + 209, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 563, + 209, + 706 + ], + "spans": [ + { + "bbox": [ + 79, + 563, + 209, + 706 + ], + "type": "text", + "content": "diminishes their overall satisfaction. With this in mind, it is important to maintain the same level of nuance and complexity in the translation to effectively convey the same level of emotional and intellectual engagement expected in professional and polished English communication. After careful consideration of these factors and the specific requirements of the translation context, I have decided to render the source text as follows: \"It looks good, but there's a tiny bit of disappointment.\" This translation not only accurately conveys the same level of appreciation and critique as the source text but also does so in a professional and tactful manner that respects the sensitivity and complexity of the situation at hand." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 79, + 706, + 118, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 706, + 118, + 713 + ], + "spans": [ + { + "bbox": [ + 79, + 706, + 118, + 713 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 79, + 714, + 209, + 728 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 714, + 209, + 728 + ], + "spans": [ + { + "bbox": [ + 79, + 714, + 209, + 728 + ], + "type": "text", + "content": "\"It looks good, but there's a tiny bit of disappointment.\" " + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 214, + 97, + 363, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 97, + 363, + 118 + ], + "spans": [ + { + "bbox": [ + 214, + 97, + 363, + 118 + ], + "type": "text", + "content": "User:北交所融资融券交易的启动是持续推进北交所市场制度建设、完善交易机制、提升二级市场定价功能的重要举措。" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 214, + 125, + 277, + 132 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 125, + 277, + 132 + ], + "spans": [ + { + "bbox": [ + 214, + 125, + 277, + 132 + ], + "type": "text", + "content": "Assistant: " + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 214, + 132, + 361, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 132, + 361, + 215 + ], + "spans": [ + { + "bbox": [ + 214, + 132, + 361, + 215 + ], + "type": "text", + "content": "When approaching the task of translating the given Chinese text into English, my primary goal is to create a faithful and engaging rendition that maintains the structure, tone, and level of detail of the original Chinese. I will focus on preserving the balance between providing all relevant information and maintaining readability, while also considering the natural flow of the English language. Additionally, I will pay attention to the following aspects:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 214, + 221, + 357, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 221, + 357, + 262 + ], + "spans": [ + { + "bbox": [ + 214, + 221, + 357, + 262 + ], + "type": "text", + "content": "1. Structure: I will aim to maintain the overall structure of the Chinese text, including the use of paragraphs and sentence order, to ensure that the English translation remains coherent and easy to follow." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 214, + 263, + 361, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 263, + 361, + 303 + ], + "spans": [ + { + "bbox": [ + 214, + 263, + 361, + 303 + ], + "type": "text", + "content": "2. Tone and voice: I will strive to capture the tone and voice of the original text, which appears to be informative and objective, while also considering the appropriate level of formality for the target audience." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 214, + 304, + 361, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 304, + 361, + 345 + ], + "spans": [ + { + "bbox": [ + 214, + 304, + 361, + 345 + ], + "type": "text", + "content": "3. Terminology: I will use accurate and up-to-date terminology related to the stock exchange, financing, and trading mechanisms, ensuring that all terms are clear and easily understood by the English-speaking audience." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 214, + 345, + 361, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 345, + 361, + 392 + ], + "spans": [ + { + "bbox": [ + 214, + 345, + 361, + 392 + ], + "type": "text", + "content": "4. Cultural and linguistic nuances: I will pay attention to any cultural or linguistic nuances present in the Chinese text and find suitable ways to convey them in English, where possible, without compromising the clarity or readability of the translation." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 214, + 393, + 361, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 393, + 361, + 435 + ], + "spans": [ + { + "bbox": [ + 214, + 393, + 361, + 435 + ], + "type": "text", + "content": "5. Number and date formats: I will convert any numbers and date formats from the Chinese system to the corresponding English system, as necessary, to ensure that the translation is both accurate and locally appropriate." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 214, + 435, + 361, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 435, + 361, + 476 + ], + "spans": [ + { + "bbox": [ + 214, + 435, + 361, + 476 + ], + "type": "text", + "content": "6. Punctuation and formatting: I will use standard English punctuation and formatting conventions, while taking into account any specific requirements or preferences for the target audience or publication platform." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 214, + 482, + 358, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 482, + 358, + 517 + ], + "spans": [ + { + "bbox": [ + 214, + 482, + 358, + 517 + ], + "type": "text", + "content": "By considering these aspects, I aim to produce a high-quality English translation that effectively communicates the content and intent of the original Chinese text. " + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 214, + 518, + 253, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 518, + 253, + 523 + ], + "spans": [ + { + "bbox": [ + 214, + 518, + 253, + 523 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 214, + 523, + 361, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 523, + 361, + 572 + ], + "spans": [ + { + "bbox": [ + 214, + 523, + 361, + 572 + ], + "type": "text", + "content": "\"The launch of financing and margin trading on the Beijing Stock Exchange is an important step in advancing the development of the exchange's market systems, improving trading mechanisms, and enhancing the secondary market's pricing capabilities.\" " + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 214, + 577, + 346, + 585 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 577, + 346, + 585 + ], + "spans": [ + { + "bbox": [ + 214, + 577, + 346, + 585 + ], + "type": "text", + "content": "User:加拿大也报告了类似的结果,只是数字较低。" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 214, + 591, + 277, + 597 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 591, + 277, + 597 + ], + "spans": [ + { + "bbox": [ + 214, + 591, + 277, + 597 + ], + "type": "text", + "content": "Assistant: " + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 214, + 598, + 358, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 598, + 358, + 700 + ], + "spans": [ + { + "bbox": [ + 214, + 598, + 358, + 700 + ], + "type": "text", + "content": "When approaching the task of translating the given Chinese text into English, my primary goal is to create a faithful and engaging rendition that maintains the structure, tone, and level of detail of the original Chinese while ensuring it reads smoothly and naturally in English. I will focus on preserving the balance between the two sentences, maintaining the use of parallel structure where appropriate, and accurately conveying the nuances of the Chinese phrasing, including the use of numbers and the comparative nature of the information presented." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 214, + 700, + 254, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 700, + 254, + 707 + ], + "spans": [ + { + "bbox": [ + 214, + 700, + 254, + 707 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 214, + 708, + 343, + 729 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 708, + 343, + 729 + ], + "spans": [ + { + "bbox": [ + 214, + 708, + 343, + 729 + ], + "type": "text", + "content": "\"Canada also reported similar results, although the numbers were lower.\" " + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 367, + 97, + 513, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 97, + 513, + 118 + ], + "spans": [ + { + "bbox": [ + 367, + 97, + 513, + 118 + ], + "type": "text", + "content": "User: to divide the 412.5 meters long, 12.9 meters wide, with a total weight of 15,000 tons of continuous beams into 96 sections." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 367, + 125, + 507, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 125, + 507, + 166 + ], + "spans": [ + { + "bbox": [ + 367, + 125, + 507, + 166 + ], + "type": "text", + "content": "Assistant: To translate the given English sentence into Chinese, I would follow these steps to ensure accuracy, fidelity to the original text, and naturalness in the target language (Chinese):" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 367, + 173, + 513, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 173, + 513, + 228 + ], + "spans": [ + { + "bbox": [ + 367, + 173, + 513, + 228 + ], + "type": "text", + "content": "1. Understand the context and meaning of the original text: I would first read and re-read the sentence to grasp its full meaning, paying close attention to technical terms, measurements, and the overall structure of the information being conveyed. This ensures a proper translation that maintains the original message." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 367, + 229, + 514, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 229, + 514, + 283 + ], + "spans": [ + { + "bbox": [ + 367, + 229, + 514, + 283 + ], + "type": "text", + "content": "2. Identify key elements and vocabulary: I would identify important numbers, measurements, and specific terms that need to be accurately translated, such as \"412.5 meters,\" \"12.9 meters,\" and \"15,000 tons.\" I would also note any technical terms related to engineering or construction, such as \"continuous beams\" and \"sections.\"" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 367, + 283, + 514, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 283, + 514, + 338 + ], + "spans": [ + { + "bbox": [ + 367, + 283, + 514, + 338 + ], + "type": "text", + "content": "3. Consider cultural and contextual nuances: While this particular sentence doesn't contain cultural references that would require adaptation, I would generally be aware of how to translate phrases or expressions that might have different connotations or usages in Chinese culture compared to English." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 367, + 338, + 490, + 345 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 338, + 490, + 345 + ], + "spans": [ + { + "bbox": [ + 367, + 338, + 490, + 345 + ], + "type": "text", + "content": "4. Choose an appropriate translation" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 367, + 345, + 511, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 345, + 511, + 400 + ], + "spans": [ + { + "bbox": [ + 367, + 345, + 511, + 400 + ], + "type": "text", + "content": "strategy: For this type of technical information, I would likely use a combination of literal translation and necessary modifications to fit Chinese syntax and readability. This might involve reordering words or phrases to sound more natural in Chinese while still maintaining the original meaning." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 367, + 401, + 483, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 401, + 483, + 407 + ], + "spans": [ + { + "bbox": [ + 367, + 401, + 483, + 407 + ], + "type": "text", + "content": "5. Translate while considering the" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 367, + 407, + 513, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 407, + 513, + 454 + ], + "spans": [ + { + "bbox": [ + 367, + 407, + 513, + 454 + ], + "type": "text", + "content": "audience: Since this translation is presumably for a Chinese-speaking audience, I would ensure that the translation aligns with how Chinese speakers might express similar ideas, using idiomatic expressions and terminology that would be familiar to them." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 367, + 455, + 514, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 455, + 514, + 503 + ], + "spans": [ + { + "bbox": [ + 367, + 455, + 514, + 503 + ], + "type": "text", + "content": "6. Account for register and tone: Depending on the intended use of the translation (e.g., technical manual, news report, etc.), I would adjust the formality and wording to match the appropriate register. For a technical report, the language would be more formal and precise." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 367, + 504, + 514, + 558 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 504, + 514, + 558 + ], + "spans": [ + { + "bbox": [ + 367, + 504, + 514, + 558 + ], + "type": "text", + "content": "7. Translate numbers and measurements: I would carefully translate the numbers and measurements given (412.5 meters, 12.9 meters, 15,000 tons) to their equivalent in Chinese, ensuring that the units of measurement are accurately reflected in the translation (e.g., using \"米\" for meters and \"吨\" for tons)." + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 367, + 559, + 514, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 559, + 514, + 613 + ], + "spans": [ + { + "bbox": [ + 367, + 559, + 514, + 613 + ], + "type": "text", + "content": "8. Ensure coherence and flow in the translation: After translating each part of the sentence, I would review and refine the translation to ensure that it flows naturally in Chinese, making any necessary adjustments to grammar, syntax, and phrasing to create a smooth and coherent translation." + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 367, + 614, + 492, + 621 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 614, + 492, + 621 + ], + "spans": [ + { + "bbox": [ + 367, + 614, + 492, + 621 + ], + "type": "text", + "content": "9. Review and make final adjustments:" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 367, + 622, + 512, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 622, + 512, + 676 + ], + "spans": [ + { + "bbox": [ + 367, + 622, + 512, + 676 + ], + "type": "text", + "content": "Finally, I would review the entire translated sentence for accuracy, fluency, and naturalness in Chinese. I would make any necessary adjustments to improve the translation, checking again against the original text to ensure that all information is preserved and that there are no errors or misunderstandings." + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 367, + 682, + 507, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 682, + 507, + 709 + ], + "spans": [ + { + "bbox": [ + 367, + 682, + 507, + 709 + ], + "type": "text", + "content": "By following these steps, I can provide a translation that is accurate, faithful to the original text, and reads naturally in Chinese." + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 367, + 717, + 504, + 724 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 717, + 504, + 724 + ], + "spans": [ + { + "bbox": [ + 367, + 717, + 504, + 724 + ], + "type": "text", + "content": "将总长412.5米、宽12.9米" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 367, + 724, + 495, + 730 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 724, + 495, + 730 + ], + "spans": [ + { + "bbox": [ + 367, + 724, + 495, + 730 + ], + "type": "text", + "content": "总重15000吨的连续梁分成96节。" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 67, + 743, + 524, + 755 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 743, + 524, + 755 + ], + "spans": [ + { + "bbox": [ + 67, + 743, + 524, + 755 + ], + "type": "text", + "content": "Figure 12: Qualitative examples (I-V) showcasing the diverse thinking patterns generated by MT-R1-Zero models." + } + ] + } + ], + "index": 51, + "type": "text" + }, + { + "bbox": [ + 496, + 157, + 510, + 169 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 496, + 157, + 510, + 169 + ], + "spans": [ + { + "bbox": [ + 496, + 157, + 510, + 169 + ], + "type": "text", + "content": "Ⅲ" + } + ] + } + ], + "index": 53 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 291, + 793, + 302, + 803 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 793, + 302, + 803 + ], + "spans": [ + { + "bbox": [ + 291, + 793, + 302, + 803 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 52 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 17 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10337/2d1c700e-c8fa-4c7f-b020-a943b3c7241d_content_list.json b/data/2025/2504_10xxx/2504.10337/2d1c700e-c8fa-4c7f-b020-a943b3c7241d_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..c92cb9b9d8d800876dab76fc53d424803128f6df --- /dev/null +++ b/data/2025/2504_10xxx/2504.10337/2d1c700e-c8fa-4c7f-b020-a943b3c7241d_content_list.json @@ -0,0 +1,1886 @@ +[ + { + "type": "text", + "text": "Heimdall: test-time scaling on the generative verification", + "text_level": 1, + "bbox": [ + 111, + 127, + 885, + 155 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Wenlei Shi, Xing Jin", + "bbox": [ + 398, + 193, + 596, + 212 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ByteDance Seed", + "bbox": [ + 429, + 223, + 568, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 452, + 292, + 545, + 310 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "An AI system can create and maintain knowledge only to the extent that it can verify that knowledge itself [23]. Recent work on long Chain-of-Thought reasoning has demonstrated great potential of LLMs on solving competitive problems, but their verification ability remains to be weak and not sufficiently investigated. In this paper, we propose Heimdall, the long CoT verification LLM that can accurately judge the correctness of solutions. With pure reinforcement learning, we boost the verification accuracy from $62.5\\%$ to $94.5\\%$ on competitive math problems. By scaling with repeated sampling, the accuracy further increases to $97.5\\%$ . Through human evaluation, Heimdall demonstrates impressive generalization capabilities, successfully detecting most issues in challenging math proofs, the type of which is not included during training. Furthermore, we propose Pessimistic Verification to extend the functionality of Heimdall to scaling up the problem solving. It calls Heimdall to judge the solutions from a solver model and based on the pessimistic principle, selects the most likely correct solution with the least uncertainty. Taking DeepSeek-R1-Distill-Qwen-32B as the solver model, Pessimistic Verification improves the solution accuracy on AIME2025 from $54.2\\%$ to $70.0\\%$ with $16\\times$ compute budget and to $83.3\\%$ with more compute budget. With the stronger solver Gemini 2.5 Pro, the score reaches $93.0\\%$ . Finally, we prototype an automatic knowledge discovery system, a ternary system where one poses questions, another provides solutions, and the third verifies the solutions. Using the data synthesis work NuminaMath [13] for the first two components, Heimdall effectively identifies problematic records within the dataset and reveals that nearly half of the data is flawed, which interestingly aligns with the recent ablation studies from NuminaMath.", + "bbox": [ + 148, + 321, + 846, + 623 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Date: April 17, 2025", + "bbox": [ + 150, + 633, + 290, + 648 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Correspondence: Wenlei Shi at wenlei.shi@bytedance.com, Xing Jin at jinxing.9@bytedance.com", + "bbox": [ + 150, + 648, + 818, + 664 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 109, + 723, + 264, + 739 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In the realm of scientific and mathematical discovery, the process of logistic verification and validation is as crucial as the initial act of problem-solving. One of the most illustrative examples of this principle can be found in the famous thought experiment 'chasing a beam of light' by Albert Einstein, where he found the paradox within the established physics theories and further formulated the principle of the constancy of the speed of light, a cornerstone of his Special Theory of Relativity. Recently, the problem solving ability of LLMs have been significantly improved. With the long Chain of Thought(CoT) reasoning, advanced LLMs are now able to effectively solve complex competition-level problems in both math and code domains. However, the verification ability of LLMs has not been sufficiently investigated. On one hand, although the intelligence of general purported LLM increases rapidly with the long CoT capabilities, we find that current SOTA models with direct prompting [5, 21] are not good at verifications on complex problems, e.g., o1-mini only achieves", + "bbox": [ + 107, + 753, + 887, + 905 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "ByteDance | Seed", + "bbox": [ + 109, + 63, + 364, + 87 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.10337v2 [cs.AI] 16 Apr 2025", + "bbox": [ + 22, + 270, + 60, + 705 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 936, + 504, + 948 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/0191c427ea854db6b0b3df2857a2ba0d5b3133bacabb267a56124a118ed19753.jpg", + "image_caption": [ + "Figure 1 Scaling of Heimdall. Left: the verification accuracy scales with the response length during RL training. With more reasoning tokens, Heimdall gives more accurate judgment on the solutions on AIME2024. Middle: the verification accuracy scales with repeated sampling and Majority Voting. By sampling multiple verification trajectories and voting, the accuracy can be further improved. Right: with Heimdall scoring the solutions on AIME2025, the problem solving accuracy scales with the number of solutions. We verify 16 times on each solution and select the most likely correct one with Pessimistic Verification $(\\times 16)$ . When inter-playing with various solver models, Heimdall gives significant improvements over pure solver-based Majority Voting(MV)." + ], + "image_footnote": [], + "bbox": [ + 114, + 125, + 367, + 308 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/85f80f96d11583bcc91e763e2173e36cf0836b619f69f65fa73aa939cc41a9b8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 370, + 125, + 625, + 309 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/50ba8d1f5ebc6b794706a38186df025e695a0e378c116cc3e91b5a1a9fbd269e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 625, + 125, + 880, + 308 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "80.9% on our evaluation dataset. On the other hand, some work [16, 17, 22, 25, 29, 32, 35] trains a dedicated model for verification or critique but the high-quality verification data is hard to collect, which limits the verification capability and hence impedes the application to challenging problems.", + "bbox": [ + 109, + 446, + 883, + 492 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we claim that verifying if a solution is correct is a special type of problem solving, i.e., a true/false question and involves step-by-step judgment on the solution. Inspired by the recent progress on the long CoT reasoning, we propose to train a long CoT verifier through reinforcement learning. We name it Heimdall, symbolizing its sharp ability to detect errors and safeguards the correctness of knowledge. We leverage PPO [20] algorithm and find that the data processing is critical to the RL training. Specifically, two types of problems hinder the optimization, i.e., easy problems with only correct solutions and hard problems with only wrong solutions, both of which lack contrastive examples and tends to guide the verifier to simply identify the hardness of a problem, rather than finding the wrong position in the solution. By filtering out the two cases, the model learns the verification ability more effectively. Taking the competitive math problems as our primary experimental domain, we show that the verification ability follows the test time scaling law where the accuracy improves significantly from $62.5\\%$ to $94.5\\%$ as the response length grows, as is shown the left of Figure 1. The performance can be further improved by sampling multiple verifications and voting on the judgment results. In the middle of Figure 1, the accuracy grows from $94.5\\%$ to $97.5\\%$ on AIME2024 as the number of verifications grows from 2 to 64. Furthermore, the evaluation from human experts shows that Heimdall generalizes well on math proof problems although it is trained with only the calculation problems with explicit answers.", + "bbox": [ + 109, + 500, + 883, + 742 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In addition, we extend the usage of Heimdall to scale up the problem solving. Suppose the solver model gives multiple solutions for a problem and Heimdall judges the correctness of each solution for multiple times. We can select the best solution based on the verification results. We frame the selection process as a multi-arm bandit problem where solutions with the same conclusion are treated as multiple visits to the same 'arm'. Based on the pessimism principle, we propose the solution selection algorithm called Pessimistic Verification that minimizes the uncertainty of selecting wrong solutions. The algorithm unifies Majority Voting and reward model based Best-of-N by balancing the contributions of the solver and the verifier, and empirically demonstrates better scaling over both algorithms. Taking DeepSeek-R1-Distill-Qwen-32B [6] as the solver model, which scores 54 on AIME2025, Pessimistic Verification raises the scores to 70 with $\\times 16$ compute, matching the performance of o1, and to 83.3 with more compute. We further test with stronger solver models, including DeepSeek-R1 [6] and Gemini 2.5 Pro[3]. As is shown in the right of Figure 1, Pessimistic Verification with Heimdall consistently improves the problem solving of various models and with Gemini 2.5 Pro, the", + "bbox": [ + 109, + 750, + 883, + 930 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 963, + 504, + 974 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "accuracy on AIME2025 reaches $93\\%$ , matching the currently reported SOTA with multiple attempts by Grok3 [4].", + "bbox": [ + 109, + 123, + 887, + 156 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Finally, we create a prototype to demonstrate the utility of Heimdall on the automatic knowledge discovery. We use the work of math data synthesis called NuminaMath [12] as the procedure of automatically proposing new problems and the corresponding solutions, and call Heimdall to detect errors in the synthetic dataset. Human annotation demonstrates that Heimdall accurately identifies the errors in the dataset. The verification result also reveals that the quality of current synthetic dataset is poor, which is consistent with the authors' finding that removing the dataset from training improves the performance of the solver model [13].", + "bbox": [ + 109, + 161, + 887, + 253 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In summary, our contributions are as follows:", + "bbox": [ + 109, + 260, + 439, + 273 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose Heimdall, the long CoT verifier by reinforcement learning and demonstrate the superior accuracy than top-tier LLMs. Heimdall also shows good generalization ability on out-of-domain problems, such as math proof problems.", + "- We propose a unified algorithm called Pessimistic Verification for inference time scaling on problem solving. Empirically, it scales better than the vanilla Majority Voting or the reward-model based Best-of-N and achieve SOTA accuracy on AIME2025.", + "- We create a prototype to show the utility of Heimdall in the autonomous knowledge discovery, where Heimdall is used to identify the correctness of the problem-solution pairs synthesized by another LLM. Human evaluations show that Heimdall can effectively detect the flaws in the synthetic data." + ], + "bbox": [ + 133, + 282, + 883, + 431 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 109, + 449, + 279, + 465 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Reasoning model. Reasoning models outperform previous general-purpose models on challenging reasoning tasks. During the chain of thought(CoT) reasoning, they keep reflecting their claims and searching viable solutions, utilizing more compute budget and providing better and more robust results. OpenAI first released its reasoning models[2, 7, 30] that performs significantly better on competitive tests like AIME and CodeForces than its previous models. Work by DeepSeek[6] and Kimi[24] independently propose different ways of reinforcement learning to trigger the reflection and searching capability in their base models. Recently, Grok3 [4] and Gemini 2.5 Pro [3] also demonstrate their impressive reasoning capabilities through long CoT.", + "bbox": [ + 109, + 479, + 887, + 585 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Generative evaluation. Recently some work are interested in improving the verification ability of the LLMs. Some [8, 29, 32, 34] explores finetuning an LLM with synthetic verification data to improve its verification ability. However, it is hard to synthesize high-quality data if the LLM inherently lacks the verification skills. One of the related topic is LLM-as-a-Judge [5, 11] where a LLM is prompted to evaluate responses from other LLMs. The work[11] leverages strong LLMs as judges to evaluate other models in various domains and reveals that strong LLM judges have good generalization ability in different domains. Some works design the judge system and analyze of the judgment behavior[19, 21, 27]. However, prompting is only effective on easy tasks, and when it comes to competitive tests, the general purported reasoning models performs not quite well, as is reveals from our test in Section 4. Another similar topic is critique [9, 10, 14, 15, 18], which often focuses on code and math problems and is used for giving suggestions for further revision. Critique fine-tuning [25] shows that fine-tuning on a high-quality critique data is beneficial to the reasoning ability of a base model. Several work, e.g., CTRL [26] leverages RL to train LLMs. However, they do not leverage the long CoT ability, which limits the verification performance on complex reasoning problems.", + "bbox": [ + 109, + 592, + 887, + 790 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 Approach", + "text_level": 1, + "bbox": [ + 109, + 804, + 243, + 821 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We define the verification task where we ask a model to judge if a solution to a problem is correct in its CoT and finally put its judgment result at the end of the response. Table 1 is the template of the verification prompt and the expected format of a response.", + "bbox": [ + 109, + 834, + 885, + 881 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 962, + 504, + 974 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Here is a math problem and a solution of it. Think step by step and verify if the final answer in the solution is correct. The last line of your response should be of the form Answer: $Answer (without quotes) where $Answer is 1 if the final answer in the solution is correct and 0 if incorrect.", + "bbox": [ + 127, + 135, + 866, + 181 + ], + "page_idx": 3 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "**Problem**\n${problem}", + "guess_lang": "txt", + "bbox": [ + 127, + 195, + 225, + 226 + ], + "page_idx": 3 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "**Solution**\n${solution}", + "guess_lang": "txt", + "bbox": [ + 129, + 239, + 223, + 272 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Table 1 Prompt template for verification.", + "bbox": [ + 356, + 297, + 637, + 311 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 Reinforcement learning for verification", + "text_level": 1, + "bbox": [ + 109, + 338, + 501, + 356 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "RL Setup. Let $\\mathcal{D} = \\{(p_i, s_i, y_i)\\}_{i=1}^N$ be our dataset, where $p_i$ is a problem, $s_i$ is a solution to the problem, which may be the response from a reasoning model and $y_i \\in \\{0, 1\\}$ represents the correctness of the solution, with 1 indicating correctness and 0 indicating incorrectness. Given a triplet $(p_i, s_i, y_i)$ , we prompt a LLM to check the correctness of the solution step-by-step and finally give a conclusion on the correctness, as is shown in Table 1. Denoting the prompt as $q_i$ , the verifier model $\\pi_\\theta(z_i, y_i'|q_i)$ takes a prompt as input and generates the CoT $z_i$ on judging the correctness of $y_i$ and at last gives a boolean conclusion $y$ if $s$ is correct. The outcome reward function $R$ is as follows:", + "bbox": [ + 109, + 364, + 888, + 470 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nR (y, y ^ {\\prime}) = \\left\\{ \\begin{array}{l l} 1 & y = y ^ {\\prime}, \\\\ - 1 & y \\neq y ^ {\\prime}. \\end{array} \\right.\n$$\n", + "text_format": "latex", + "bbox": [ + 406, + 479, + 588, + 520 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Then the objective of RL is,", + "bbox": [ + 109, + 537, + 316, + 551 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {J} (\\theta) = \\mathbf {E} _ {(q, y) \\sim \\mathcal {D}, (z, y ^ {\\prime}) \\sim \\pi_ {\\theta} (q)} \\left[ R (y, y ^ {\\prime}) \\right]\n$$\n", + "text_format": "latex", + "bbox": [ + 364, + 574, + 630, + 593 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We run the vanilla PPO algorithm on a reasoning model, and propose the following strategy for improvement.", + "bbox": [ + 109, + 604, + 885, + 621 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Data collection and filtering. We collect the dataset $\\mathcal{D}$ by prompting one or multiple reasoning models to solve problems. For every problem in the dataset, we collect multiple solutions and construct a verification prompt with each solution using the template in Table 1. However, two cases may hurt the RL training, i.e., the extremely difficult problem, which we fail to sample any correct solutions and the extremely easy problems, which we fail to sample any wrong solutions. Such unbalanced data may teach the verifier to be biased on the difficulty of the problem, i.e., be optimistic on easy problems and pessimistic on difficult problems. Therefore, we do not include the data of the two cases in the training dataset.", + "bbox": [ + 109, + 628, + 887, + 733 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 Solution selection by Pessimistic Verification", + "text_level": 1, + "bbox": [ + 109, + 747, + 555, + 763 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "When tackling challenging problems, one can sample multiple solutions and leverage the verifier to identify the most likely correct one. By sampling verification responses multiple times, we can achieve more reliable judgments, thereby improving overall problem-solving performance. We propose a principled and flexible method for the inference time scaling along the two dimensions, i.e., the amount of solutions sampled from the solver model and the amount of verifications sampled from a verifier model. Denote the number of solutions to a problem as $N$ and the number of verifications on each solution is $M$ . We initially conceptualize the selection process as a multi-arm bandit problem, where each arm corresponds to a distinct answer, and each verification constitutes a visit to an arm. The reward is the verification result, which can be either 1 or 0. Each time the solver generates a solution, the arm representing the solution's answer receives $M$ visits and immediately accrues $M$ rewards. The straight-forward approach is to calculate the average reward each arm receives as its score and greedily select the one with the highest score. However, for those with few visits, the score fluctuates", + "bbox": [ + 109, + 771, + 885, + 938 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 963, + 504, + 973 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "and can be unreliable. Following the pessimism principle in RL, we introduce the lower-confidence-bound, which adds an uncertainty penalty to the score. Let $r_0, r_1, \\ldots, r_K$ be the average reward of each answer and $N_0, N_1, \\ldots, N_K$ be the number of solutions that drives to a certain answer. The selection algorithm is defined as:", + "bbox": [ + 109, + 123, + 887, + 180 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {a} := \\arg \\max _ {a _ {i}} \\left(r \\left(a _ {i}\\right) - \\alpha \\frac {\\ln (N M)}{N _ {i} M + 1}\\right) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 369, + 181, + 885, + 213 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where the parameter $\\alpha$ is a hyper-parameter that balances the consideration of uncertainty in the decision-making process and $\\ln$ is the natural logarithm. Intuitively, the first term reflects the signals from the verifier, while the second term accounts for the bias of solver in the answer space.", + "bbox": [ + 109, + 224, + 888, + 271 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- When $N_{i}$ is small, the second term dominates, which neglects the verification and in the extreme case, it collapses to Majority Voting.", + "- When $N_{i}$ is large, the first term becomes more important, and in the extreme case, it simply selects the answer with the best verification score." + ], + "bbox": [ + 133, + 277, + 883, + 345 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The phase shift aligns with the fact that Majority Voting is trapped in the bias of the solver, for example when a wrong answer occurs more frequently than the correct one, and as $N$ and $M$ is large, the verification scores stabilize and we tend to trust more on it, because the verification is often easier than the solution.", + "bbox": [ + 109, + 353, + 885, + 398 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 109, + 415, + 269, + 431 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1 Dataset", + "text_level": 1, + "bbox": [ + 109, + 443, + 230, + 458 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our experiment is on the math problems. The training dataset comes from the AoPS website and official math competition homepages, similar to that of DAPO [28]. We leverage DeepSeek-R1-Distill-Qwen-32B model as the policy model to generate 16 solutions to each problem. We leverage a rule-based program to check if the final answer in the solution is correct, which compares the reference answer of a problem and the answer in the solution and outputs the label, i.e., 1 for a correct response and 0 for the incorrect response. Then we construct the verification dataset with the prompt template in Table 1. To keep the prompt clean and short, we remove the `` part in each solution and only use the summary part.", + "bbox": [ + 109, + 467, + 888, + 575 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We test the verification ability on both AIME2024 and AIME2025, 60 questions in total. During training, we monitor the performance on AIME2024 and select the best checkpoint as the final version of Heimdall. Therefore, one can treat AIME2024 as the validation dataset and AIME2025 as the test dataset.", + "bbox": [ + 109, + 580, + 887, + 628 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2 Scaling of verification", + "text_level": 1, + "bbox": [ + 109, + 640, + 356, + 657 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Figure 2 shows the accuracy and the length of response tokens during RL training. As depicted by the blue curve, both accuracy and response length increase with the number of training steps, albeit at different rates. At the early stage, the accuracy improves rapidly, but the response length fluctuates. This is because a minor adjustment to the policy can significantly boost accuracy. Later, the response length grows constantly, while the accuracy gradually converges to $94.5\\%$ , because the model is learning to tackle the hardest part in the training dataset, which requires the increasingly more reasoning tokens. The red curve represents the RL training without the data filtering strategy, i.e., incorporating both extreme cases of difficult and easy problems in the dataset. As training progresses, the performance gap becomes more pronounced, indicating that the absence of contrastive examples detrimentally impacts performance. In addition, we test o1-mini with the same evaluation data, which is shown as the dash line in the left of Figure 2. Our model outperforms o1-mini in fewer than 20 steps, indicating substantial potential for enhancing the verification capabilities of general-purpose reasoning models.", + "bbox": [ + 109, + 664, + 887, + 847 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We further look into cases to understand what Heimdall has learned during the training. Table 3 shows the verification of a correct solution to a hard problem in AIME2025. Due to space limitations, we only highlight some key points. We can observe two types of checking:", + "bbox": [ + 109, + 853, + 885, + 900 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "- Forward checking. It checks if the reasoning chain in every step of the solution is correct, which is generally applicable to all problems.", + "bbox": [ + 133, + 906, + 883, + 938 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 963, + 504, + 974 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/8b644b76187ca40820b4d0310eeca3a8a74118f248c5eab10b3dc3316d3bdd02.jpg", + "image_caption": [ + "Figure 2 Accuracy and response length during RL training. PPO w/o data filtering is the RL training with all problems in the dataset. Left: the accuracy on AIME2024 with the training steps. Right: the response length on the training dataset with the training steps." + ], + "image_footnote": [], + "bbox": [ + 114, + 122, + 488, + 339 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/707053fdd7c511e4dae86189152a9f568239d545b75fac1df3d40b0dcfd37403.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 491, + 122, + 883, + 339 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "- Backward checking. It checks whether a conclusion, be it intermediate or final, fits the known constraints. For some types of problems like solving equations and finding the general term formula of a sequence, the backward checking is efficient and easy to implement.", + "bbox": [ + 135, + 422, + 887, + 468 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The case exemplifies the common task of deriving a general formula for a sequence. As illustrated, Heimdall applies both methods of validation to confirm the correctness of the solution.", + "bbox": [ + 109, + 474, + 883, + 505 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Next, we investigate how the verification ability scales as the number of verifications increases. We sample 64 solutions for each problem with the solver model and 64 verifications for each solution, resulting in a total of $30 \\times 64 \\times 64$ responses on either AIME2024 or AIME2025 dataset. Denoting the number of verifications of each solution as $N$ , we randomly select $N$ verifications for each solution from the data collected above, and determine the final score by some aggregation operation, e.g., Majority Voting and averaging. We repeat the process for 2048 times to eliminate any fluctuations in the statistics. Taking Majority Voting as the aggregation operation, we compute the accuracy, the false positive rate and the false negative rate at every compute budget $N$ , as is shown in the top of Figure 3. In addition, we take the average of $N$ scores, a decimal number in [0, 1], as the final score, and draw the curve of the AUC score in the bottom-left of Figure 3. It shows that Heimdall's performance can be significantly improved by simply repeat sampling more trajectories. As $N$ goes larger, the performance gradually converges to a upper limit. For example, the accuracy converges to about $97.5\\%$ on AIME2024 and $96.0\\%$ on AIME2025, and the remaining failure cases are the bias inherent in the model that could not be eliminated by adding more compute budget. We further analyze the distribution of those failure cases. For each problem, we calculate two statistics, one is the difficulty of solving it, which is estimated by the pass rate over its 64 solutions. and the other is the difficulty of verifying its solutions, which is estimated by the total number of verification failures on its solutions. Taking the two values as the x-axis and the y-axis respectively, we visualize their correlations in the bottom-right of Figure 3. We observe that the difficulty of a problem does not necessarily correlate to the difficulty of verifying its solutions. For example, Heimdall fails 17/64 times on a problem whose pass-rate is $67\\%$ .", + "bbox": [ + 109, + 513, + 883, + 800 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In addition, a clear observation is that the performance of Heimdall on AIME2025 is generally worse than that on AIME2024. We believe the main reason is that the verifying solutions on AIME2025 is harder than that on AIME2024. One evidence is that, o1-mini achieves $80.9\\%$ in AIME2024 and $75.3\\%$ in AIME2025, whose degradation is larger than that of our model.", + "bbox": [ + 109, + 806, + 883, + 868 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3 Scaling of problem solving with verification", + "text_level": 1, + "bbox": [ + 109, + 881, + 545, + 898 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In terms of problem solving, repeated sampling[1] is known to scale with the inference time compute. We evaluate multiple scaling algorithms, including both with and without the verifier. We analyze the inference", + "bbox": [ + 109, + 906, + 883, + 936 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 962, + 504, + 973 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/fe886d189f024c5120df42a013683bee8b158b3fc37ba3ee60cf7194f46f1c8d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 120, + 252, + 501, + 470 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6f484e4be0cf4cc54219a65ea52ca98d6d801ffe275f018bf2ff3e34d42dd81f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 252, + 883, + 470 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/695975f877c08159c982c176f2bf6d3900e61dc49ccd1d75fb0027e1967b18eb.jpg", + "image_caption": [ + "Figure 3 The inference-time scaling of verification ability on problem solutions in AIME2024 and AIME2025. Top-left: We show the accuracy of Heimdall when we sample multiple verification responses and make the judgment by majority voting. Top-right: We show the decreasing false-negative rate(FNR) and false-positive rate(FPR) as we scale up verification responses with majority voting. Bottom-left: We calculate the average score of verification responses and draw the AUC along each number of responses. Bottom-right: We collect the verification failure cases on every math problem and draw the relation between the difficulty of the problem and the number of verification failures, which reveals that the verification difficulty may not necessarily correlate with the difficulty of the original problem." + ], + "image_footnote": [], + "bbox": [ + 116, + 473, + 500, + 693 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/88fa108dfa2c896417548184733701d2de540dc592cd6d34e1de0766047ec256.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 473, + 883, + 693 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 962, + 504, + 973 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "···", + "bbox": [ + 130, + 268, + 148, + 276 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "**Problem**", + "bbox": [ + 129, + 290, + 225, + 304 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Let the sequence of rationals $x_{1}, x_{2}, \\ldots$ be defined such that $x_{1} = \\frac{25}{11}$ and", + "bbox": [ + 129, + 305, + 656, + 324 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\nx _ {k + 1} = \\frac {1}{3} \\left(x _ {k} + \\frac {1}{x _ {k}} - 1\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 400, + 333, + 593, + 367 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "$x_{2025}$ can be expressed as $\\frac{m}{n}$ for relatively prime positive integers $m$ and $n$ . Find the remainder when $m + n$ is divided by 1000.", + "bbox": [ + 129, + 377, + 866, + 407 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "**Solution**", + "bbox": [ + 129, + 421, + 223, + 436 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "···", + "bbox": [ + 130, + 443, + 148, + 450 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "**Verification**", + "bbox": [ + 129, + 465, + 246, + 479 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "", + "bbox": [ + 129, + 482, + 194, + 494 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Okay, so I need to figure out if the given solution to this problem is correct. Let me read through it again to make sure I understand.", + "bbox": [ + 129, + 496, + 864, + 526 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The problem is: ...", + "bbox": [ + 130, + 527, + 267, + 541 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The solution provided has four key steps. Let me go through each step and verify.", + "bbox": [ + 129, + 542, + 718, + 556 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Step 1: They let $x_{k} = a_{k} / b_{k}$ with $a_{k}$ and $b_{k}$ coprime. Then they write the recurrence relations as $a_{k + 1} = a_k^2 +b_k^2 -a_kb_k$ and $b_{k + 1} = 3a_kb_k$ .", + "bbox": [ + 129, + 556, + 864, + 587 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Wait, let's verify that. Starting from $x_{k + 1} = (1 / 3)(x_k + 1 / x_k - 1)$ . If $x_{k} = a_{k} / b_{k}$ , then $1 / x_{k} = b_{k} / a_{k}$ So $x_{k} + 1 / x_{k} = (a_{k}^{2} + b_{k}^{2}) / (a_{k}b_{k})$ . Okay, so step 1 seems correct. [Forward checking]", + "bbox": [ + 129, + 587, + 866, + 619 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "···", + "bbox": [ + 130, + 625, + 148, + 631 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Wait let me confirm with $S_{3}$ : $a_{3} = a_{2}^{2} + b_{2}^{2} - a_{2}b_{2}$ . Wait $a_{2} = 471$ , $b_{2} = 825$ . So $a_{3} = 471^{2} + 825^{2} - 471*825$ . [Backward checking]", + "bbox": [ + 129, + 632, + 866, + 662 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "", + "bbox": [ + 130, + 670, + 202, + 691 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The solution correctly arrives at the conclusion despite some initial confusion in their explanation. They properly computed each part modulo 1000, leading to the correct final result.", + "bbox": [ + 129, + 691, + 864, + 723 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Answer: 1", + "bbox": [ + 130, + 724, + 205, + 736 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 2 A case of verification on hard problems. The verification process contains both forward checking, i.e., checking if each step is correct and backward checking, i.e., checking if a conclusion matches constraints, as is shown in the position of the red texts.", + "bbox": [ + 109, + 766, + 883, + 808 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 963, + 503, + 973 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a6ceaaf54bd7ad5fb8518d4d94c967fe0a5ccc33696d9d663f524fd60bc4d369.jpg", + "image_caption": [ + "Figure 4 The inference-time scaling of problem solving with Heimdall. The two figures show the accuracy on AIME datasets as the number of solutions scales up. Left: the problem solving accuracy on AIME2025 dataset scales with the number of solutions. The colored shaded area represents the area covered by the accuracy curves of a selection algorithm as the number of verifications increases from 1 to 64. Right: the contour map of the accuracy of Pessimistic Verification as the number of solutions (x-axis) and the number of verifications (y-axis) increase. The red curve indicates the optimal configurations within various overall compute budgets." + ], + "image_footnote": [], + "bbox": [ + 114, + 125, + 439, + 351 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/be88d7c5a6c41fb2c49cdc172f640ecb7aa221ceb85fa1108b2a5cc20ca5ec0d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 442, + 125, + 880, + 351 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "time scaling in two dimensions, i.e., the number of solutions $N$ and the number of verifications on each solution $M$ . Considering the huge computational cost, we only evaluate on AIME2025, with $N \\in [2, 256]$ and $M \\in [1, 64]$ . Similar to the analysis of verification accuracy, we first sample $N = 256$ solutions for each problem and $M = 64$ verifications for each solution as the complete data and then randomly select a subset to evaluate each scaling algorithm under a specific compute budget $(M, N)$ . We repeat the sampling for 2048 time to get a stable average score. We set $\\alpha = 0.1$ in Pessimistic Verification and also evaluate the other three selection algorithms as follows.", + "bbox": [ + 109, + 474, + 883, + 582 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Majority Voting Majority voting is one of the most commonly used inference time scaling methods. It first categorize the solutions, e.g., by the final answers for math problems. It simply selects the category that contains the largest number of solutions in it. As fore-mentioned, majority voting can be seen as a special case of Pessimistic verification, where $\\alpha$ is large enough to overshadow the signal of verification.", + "bbox": [ + 109, + 598, + 883, + 660 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Shortest Majority Voting. The recent work [31] observes a length bias that the correct solutions are often shorter than incorrect ones for the same questions. Suppose the answer $a_i$ occurs $c_i$ times in the sampled responses and the average length of responses with the answer $a_i$ is $l_i$ , the voting score for $a_i$ is", + "bbox": [ + 109, + 676, + 883, + 723 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\ns _ {i} = \\frac {c _ {i}}{l _ {i}}\n$$\n", + "text_format": "latex", + "bbox": [ + 468, + 732, + 527, + 760 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Sampling-based Search. The work [33] leverages a commercial LLM as the verifier, and scales the inference-time computation on the number of sampled solutions and the number of verifications. During the selection, it calculates the average verification score of each solution and selects the solution with the largest score. Note that it does not group the solutions based on their answers, which is different from the special case of Pessimistic verification where $\\alpha$ equals zero.", + "bbox": [ + 109, + 777, + 883, + 854 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Tie-breaking rules. The selection algorithm may encounter a tie situation, where multiple options have the same score. In principle, one can introduce another model to compare which option is better, but for simplicity, we leverage the length prior to break the tie, namely, selecting the option with the shortest average solution length.", + "bbox": [ + 109, + 862, + 883, + 922 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 963, + 503, + 973 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The left of Figure 4 shows how the accuracy of different scaling algorithms changes with the number of solutions $N$ . Majority voting are the worst among all scaling algorithms under the same $N$ . By employing the length prior, Shortest Majority Voting gives a better accuracy when $N$ is small, but finally converges to $70\\%$ , the same as that of Majority Voting. The red and blue areas represent the areas covered by the group of accuracy curves with $M \\in [1,64]$ of Sampling-based Search and Pessimistic Verification respectively. As the figure shows, with the help of verification, the accuracy is significantly improved. In addition, Pessimistic Verification is better than Sampling-based Search when $M$ is small. The reason is that the verification process is inherently probabilities. Even for a easy task, it is still possible that a wrong solution is judged as correct and is finally selected as the final solution. With the second term in Equation 1, Pessimistic Verification penalizes such uncertain cases and favors those with more visits. As $M$ goes larger, the verification scores stabilize and the second term becomes smaller, and the gap between the two algorithms gets smaller. Interestingly, when $N$ is large, the gap is large again. By checking the typical cases, we find that it comes from the grouping of solutions. Pessimistic Verification aggregate the solutions with their answers while Sampling-based Search treats each solution independently. Two solutions may have the same final answer, but their approaches or expressions can be entirely different. The aggregation takes this variance into consideration, so makes more robust selection. Note that such grouping is not generally applicable, e.g., grouping the solutions of proof problems is not straightforward. In those cases, we expect the two algorithm converges to the same limit.", + "bbox": [ + 109, + 123, + 887, + 380 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Taking the accuracy of Pessimistic Verification as a function of $M$ and $N$ , we draw the contour map in the right of Figure 4. $M = 0$ represents the vanilla Majority Voting without verifications. We can see that increasing either $N$ or $M$ improves the performance. Each point $(M, N)$ involves $M$ responses by the solver model and $M \\times N$ responses by the verifier, which is $M \\times (N + 1)$ responses in total. By minimizing the overall budget, we derive the compute-optimal configurations for different compute budgets, with the constraint that $M$ and $N$ are non-negative integers. As is shown in the figure, we need to alternately increase $N$ and $M$ , but we should increase $N$ more frequently. The reason is that the correct answer for a hard problem is sparse. To get the problem solved with a scaling algorithm, we first need to give a sufficient budget to get the correct answer.", + "bbox": [ + 109, + 388, + 887, + 508 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Remaining room for improvement. Can Heimdall be better? The black dashed curve in the left of Figure 4 is Best-of-N that selects the response that equals the ground-truth answer, which is the upper-limit of any scaling algorithms. When $N$ is small, Pessimistic Verification Pessimistic Verification performs near the upper limit, but the gap widens as $N$ increases. Consider the configuration $M = 256$ , $N = 64$ . Pessimistic Verification gets a score of $83.3\\%$ and the upper limit is $93.3\\%$ , so the gap is 3 problems. Looking into the individual problems, we find that there are 4 problems that have only one correct solution among the 256 solutions. Heimdall manages to identify the correct solution on one of them, which is the case fore-mentioned in Table 3, but fails on the other three problems. The failed three problems involve spatial reasoning, which the base model of Heimdall is not very skilled at. We believe that as the ability of the base model becomes better, the verification ability can reach the upper limit.", + "bbox": [ + 109, + 516, + 887, + 667 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Coordination with other solver models. In the previous experiments, we use DeepSeek-R1-Distill-Qwen-32B as the solver model, which is the model to collect data during the training phase. To test Heimdall's generalization to other solver models, we test on DeepSeek-R1 and Gemini 2.5 Pro. For DeepSeek-R1, we directly extract the summary in its response, while for Gemini 2.5 Pro, we leverage another LLM to summarize its solution because we observe that its responses contains many reflections, which might confuse the verification process. Due to the limited compute budget, we set the total number of verifications to 16 and the total number of solving attempts to 64. We find that the verification accuracy on DeepSeek-R1 and Gemini 2.5 Pro is $90.1\\%$ and $89.9\\%$ , respectively, close to that on DeepSeek-R1-Distill-Qwen-32B. The scaling in problem-solving, as is shown in the right of Figure 1, indicates that Heimdall consistently improves the accuracy with various solver models and using stronger solver models results in higher accuracy. Meanwhile, the improvements for Gemini 2.5 Pro is smaller compared to those of the other models. The reason is that the accuracy is already high on AIME2025 and the base model of Heimdall inherently lacks some ability on certain problems.", + "bbox": [ + 109, + 675, + 887, + 857 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "5 Generalization to math proof problems", + "text_level": 1, + "bbox": [ + 109, + 872, + 526, + 890 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Heimdall is trained on math problems with explicit answers because it is easy to determine the correctness of the solution. However, there are many other types of problems, some of which might be more challenging.", + "bbox": [ + 109, + 902, + 887, + 933 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 962, + 508, + 974 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Here is a math problem and a solution of it. Think step by step and verify if each proof step in solution is correct. The last line of your response should be of the form Answer: $Answer (without quotes) where $Answer is 1 if the solution is correct and 0 if incorrect.", + "bbox": [ + 129, + 258, + 866, + 305 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "**Problem**", + "bbox": [ + 129, + 318, + 225, + 332 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The n players of a hockey team gather to select their team captain. Initially, they stand in a circle, and each person votes for the person on their left.", + "bbox": [ + 129, + 334, + 864, + 364 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The players will update their votes via a series of rounds. In one round, each player updates their vote, one at a time, according to the following procedure: At the time of the update, if a is voting for b, and b is voting for c, then a updates their vote to c. (Note that a, b, and c need not be distinct; if $\\mathrm{b} = \\mathrm{c}$ , then a's vote does not change for this update.) Every player updates their vote exactly once in each round, in an order determined by the players (possibly different across different rounds). They repeat this updating procedure for n rounds. Prove that at this time, all n players will unanimously vote for the same person.", + "bbox": [ + 129, + 364, + 866, + 470 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "\\*\\*Solution\\*\\* ...", + "bbox": [ + 129, + 484, + 225, + 515 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "**Verification**", + "bbox": [ + 129, + 529, + 246, + 559 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Wait, but since the solution didn't present this reasoning, their proof is flawed. They presented an incorrect model of the process, leading to an overestimation of the rounds needed, but the conclusion is still correct. However, the problem is to verify if each step in their solution is correct. Since their step 4's reasoning is wrong, their proof is incorrect.", + "bbox": [ + 129, + 570, + 864, + 635 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "", + "bbox": [ + 129, + 641, + 202, + 662 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The solution's step 4 incorrectly assumes that each person's vote jumps $n$ times, leading to covering the entire ring. The correct approach is to note that each round reduces the number of distinct targets by one, leading to convergence in $n-1$ rounds. Thus, while the conclusion is correct, the reasoning in step 4 is flawed.", + "bbox": [ + 129, + 664, + 864, + 723 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Answer: 0", + "bbox": [ + 130, + 726, + 207, + 739 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Table 3 A case of verification on the math proof problems. The problem is P1 in Canadian Mathematical Olympiad 2025. We modify the prompt template to check the proof process rather than the final answer, which is the red text. Heimdall checks the proof step by step and finds that the solution lacks rigorous proofs in step 4.", + "bbox": [ + 109, + 768, + 883, + 811 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 962, + 506, + 974 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "In this section, we would like to test Heimdall's capability in verifying mathematical proof problems. We select 10 proof problems from Mathematics Olympiad of different countries from the years 2024 and 2025, and leverage a solver model, i.e., DeepSeek-R1-Distill-Qwen-32B, to generate a proof process for each problem. Considering that the solver model is not good at spatial reasoning, we do not select geometry-related problems. Heimdall is then employed to check the correctness of each proof. Finally, we have experts evaluate both the proof processes and Heimdall's verifications. The solver model correctly solves 2 problems, while the remaining 8 are incorrect. To our surprise, Heimdall correctly judges 9/10 cases, identifying 2/2 correct proofs and detecting issues in 7/8 incorrect proofs. There is 1 problem where Heimdall fail to identify the error in the proof, resulting in a false-negative judgment. Looking into the specific cases, we find that Heimdall judges the correctness with both forward and backward checking, e.g., checking each step and testify with examples. It is capable of identifying most errors or unproven assumptions in the solution, but for some subtle problems, e.g., the assumption does not appear in the form of a proposition but is implicitly assumed during the proof process, Heimdall might fail. We believe that introducing the proof data in the RL training would improve the performance of Heimdall and an important direction in the future would be how to generate the dataset in large scale.", + "bbox": [ + 109, + 123, + 887, + 351 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "6 Verification on automatic knowledge discovery", + "text_level": 1, + "bbox": [ + 109, + 367, + 607, + 385 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In the process of human exploration of the unknown, some scientists pose questions, some propose solutions to these questions, and others focus on verifying the correctness of solutions provided by their predecessors. Verification itself, as a crucial part of knowledge discovery, ensures the correctness of new knowledge. In this section, we design a prototype that simulates the stages of posing questions and solving them, using the synthesis of math problems by NuminaMath [12], to evaluate Heimdall's effectiveness in detecting problematic knowledge. NuminaMath open-sources a comprehensive collection of 860,000 pairs of math problems and reference solutions. It includes 229,982 MATH-level pairs and 62,108 AMC-AIME-level pairs that are synthesized from seed problems in MATH and AMC-AIME training dataset. We test Heimdall on the harder one, i.e., the AMC-AIME dataset. Flaws can exist either within the problem itself, such as an unsolvable problem, or within the solution provided. Theoretically, by checking if the solution satisfies all the requirements in the problem, Heimdall can detect both flaws. Meanwhile, the task also indirectly tests the generalization capabilities of Heimdall, as the qualities of the problems in the training set is much higher.", + "bbox": [ + 109, + 396, + 887, + 578 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We randomly sample 8,192 pairs of questions and solutions as the test set. For each pair, we construct the prompt and query Heimdall 8 times. We calculate the sum of the verification scores, which ranges from 0 to 8 and illustrate the distribution in Figure 5. As is shown, near a half of the data is labeled incorrect with a high confidence, which is consistent with the experience listed in the latest NuminaMath-1.5 website [13] that by the ablation study, the authors find that this dataset hurts a bit the performance and plan to remove all synthetic data until they find a way to reliably generate high-quality synthetic problems. To measure the verification correctness, we randomly select 10 cases in the 0-scored group and manually check their correctness. We find that for all cases, the judgments by Heimdall are correct. Table 4 is a case of the problematic data and the verification. Due to space limitations, we retain only the essential information, with the rest omitted. The question does not have the correct answer among its options. Meanwhile, the solution mistakenly", + "bbox": [ + 109, + 585, + 472, + 916 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/2f73438f093255b5510d6470fa706f84aab4ade82b7a97970530daabc2a053ef.jpg", + "image_caption": [ + "Figure 5 The distribution of verification scores on the problems of a synthetic dataset. The x-axis is the sum of scores across 8 verifications and the y-axis is the number of problems corresponding to each sum." + ], + "image_footnote": [], + "bbox": [ + 498, + 604, + 880, + 824 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "uses an incorrect value in the equation and interestingly arrives at one of the given options. We hypothesize", + "bbox": [ + 109, + 917, + 883, + 931 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 962, + 508, + 974 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Prompt:", + "text_level": 1, + "bbox": [ + 130, + 213, + 192, + 228 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Here is a math problem and a solution of it. Think step by step and verify if the final answer in the solution is correct. ...", + "bbox": [ + 127, + 241, + 867, + 273 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "**Problem**", + "bbox": [ + 127, + 286, + 225, + 300 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A rabbit starts jumping from the point $(2,3)$ on a rectangular board that extends from $(0,0)$ to $(6,5)$ . Each jump it makes is either left, right, up, or down, and has a length of 1 unit. Each jump direction is uniformly and randomly chosen. The jumping sequence continues until the rabbit reaches any border of the rectangle. Calculate the probability that the rabbit's sequence of jumps ends on the top or bottom side of the rectangle.", + "bbox": [ + 127, + 301, + 867, + 376 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "(A). $\\frac{1}{4}$ (B). $\\frac{17}{32}$ (C). $\\frac{17}{24}$ (D). $\\frac{1}{3}$ (E). $\\frac{3}{4}$", + "bbox": [ + 129, + 377, + 364, + 393 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "**Solution**", + "bbox": [ + 129, + 406, + 223, + 420 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Let $P_{(x,y)}$ denote the probability of the rabbit ending on the top or bottom side starting from $(x,y)$ . Using symmetry about the center line $y = 2.5$ and $x = 3$ , and recursive relations, we analyze the movements.", + "bbox": [ + 127, + 421, + 867, + 465 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "···", + "bbox": [ + 130, + 474, + 148, + 481 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Solving Equations: We solve this using recursive approaches and considering border cases explicitly. Assuming symmetry simplifies $P_{(3,2)} = P_{(3,3)}$ .", + "bbox": [ + 127, + 482, + 864, + 513 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Here, let's consider only two key points $(2,3)$ and $(3,3)$ for simplicity. Solving:", + "bbox": [ + 127, + 513, + 696, + 527 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\nP _ {(2, 3)} = \\frac {1}{4} P _ {(2, 2)} + \\frac {1}{4} (1) + \\frac {1}{4} P _ {(1, 3)} + \\frac {1}{4} P _ {(3, 3)}\n$$\n", + "text_format": "latex", + "bbox": [ + 343, + 537, + 653, + 566 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "··", + "bbox": [ + 129, + 582, + 148, + 588 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Thus, the rabbit's sequence of jumps ends on the top or bottom side with a probability of $\\frac{17}{24}$ . The final answer is (C) $\\boxed{\\frac{17}{24}}$", + "bbox": [ + 127, + 588, + 867, + 642 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Response:", + "text_level": 1, + "bbox": [ + 130, + 652, + 207, + 666 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "", + "bbox": [ + 129, + 680, + 196, + 695 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "···", + "bbox": [ + 130, + 704, + 148, + 710 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "", + "bbox": [ + 129, + 712, + 202, + 724 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The solution provided incorrectly assumes that $\\mathrm{P}(2,4) = 1$ , which is not true since (2,4) is not a top or bottom border. They oversimplified the equations by not accounting for the correct dependencies of other points like $\\mathrm{P}(2,4)$ and mishandled the symmetry in y-direction due to different boundary conditions $(\\mathrm{P} = 0$ at $\\mathrm{y} = 0$ and $\\mathrm{P} = 1$ at $\\mathrm{y} = 5$ ).", + "bbox": [ + 127, + 726, + 867, + 786 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Answer: 0", + "bbox": [ + 130, + 787, + 207, + 800 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Table 4 An example of problematic synthetic data and the verification. The verification correctly points out the problem in the solution.", + "bbox": [ + 109, + 829, + 885, + 859 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 962, + 509, + 974 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "that the task is somewhat challenging for the LLM used for synthetic data generation, leading the LLM to hallucinate a superficially consistent but wrong output.", + "bbox": [ + 109, + 125, + 885, + 156 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "7 Future Work", + "text_level": 1, + "bbox": [ + 109, + 170, + 269, + 186 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Firstly, the verification dataset is formed by simply extracting the summary part of a reasoning model, which as we observed is sometimes overly brief, e.g., driving to an answer with only one sentence. A more detailed explanation would make the verification easier. One can further improve the verification accuracy by summarizing the reasoning process with another LLM. Secondly, we mainly evaluate the verification ability on math problems that have final answers. There are many other types of tasks, e.g., coding problems and Mathematical proof problems. Although the learned ability is generalizable to other domains, we expect it beneficial to train with data in other domains. For example, in the context of coding problems, backward checking may take the alternative form of designing test cases. Lastly, we only prototype the usage of Heimdall in the automatic knowledge discovery. In real scenarios, posing valuable questions is a challenging task that demands both curiosity and keen insight. Such ability is often the critical part of the scientific discovery, which however is seldom investigated. We believe that as the general capabilities of LLM continues to advance, this direction will become more and more important.", + "bbox": [ + 109, + 199, + 888, + 382 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "8 Conclusion", + "text_level": 1, + "bbox": [ + 109, + 397, + 253, + 414 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In this paper, we propose to train a long CoT verifier called Heimdall with reinforcement learning. On the competitive math problems, Heimdall achieves high accuracy and scales well along both the length of reasoning chains and the number of repeated generation. Through human evaluation, we find that Heimdall also shows impressive generalization ability on out-of-domain problems, such as math proofs. We further propose the inference time scaling algorithm called Pessimistic Verification, which incorporates a solver and Heimdall for problem solving. By scaling up the compute, we can achieve the performance comparable to top-tier models on challenging math problems. Lastly, we design a prototype of automatic knowledge discovery and demonstrate that Heimdall can reliably detect flaws in the synthetic data from another LLM.", + "bbox": [ + 109, + 426, + 888, + 550 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "9 Acknowledgments", + "text_level": 1, + "bbox": [ + 109, + 564, + 328, + 583 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We thank the data annotation team for their expertise on collecting the evaluation data and analyzing the verification outputs, including Bocheng Zhou, Weijian Zhao, Tong Sun and Zhiyuan Zhang.", + "bbox": [ + 109, + 593, + 885, + 625 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 962, + 509, + 974 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 112, + 121, + 223, + 136 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling. arXiv preprint arXiv:2407.21787, 2024.", + "[2] Ahmed El-Kishky, Alexander Wei, Andre Saraiva, Borys Minaev, Daniel Selsam, David Dohan, Francis Song, Hunter Lightman, Ignasi Clavera, Jakub Pachocki, et al. Competitive programming with large reasoning models. arXiv preprint arXiv:2502.06807, 2025.", + "[3] Google. Gemini 2.5: Our most intelligent ai model, 2025. URL https://blog.google/technology/google-deepmind/gemini-model-thinking-updates-march-2025/#gemini-2-5-thinking. Accessed: 2025-03-25.", + "[4] Grok. Grok 3 beta — the age of reasoning agents, 2025. URL https://x.ai/news/grok-3. Accessed: 2025-02-19.", + "[5] Jiawei Gu, Xuhui Jiang, Zhichao Shi, Hexiang Tan, Xuehao Zhai, Chengjin Xu, Wei Li, Yinghan Shen, Shengjie Ma, Honghao Liu, et al. A survey on lmm-as-a-judge. arXiv preprint arXiv:2411.15594, 2024.", + "[6] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025.", + "[7] Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024.", + "[8] Minki Kang, Jongwon Jeong, and Jaewoong Cho. T1: Tool-integrated self-verification for test-time compute scaling in small language models, 2025. URL https://arxiv.org/abs/2504.04718.", + "[9] Tian Lan, Wenwei Zhang, Chengqi Lyu, Shuaibin Li, Chen Xu, Heyan Huang, Dahua Lin, Xian-Ling Mao, and Kai Chen. Training language models to critique with multi-agent feedback. arXiv preprint arXiv:2410.15287, 2024.", + "[10] Tian Lan, Wenwei Zhang, Chen Xu, Heyan Huang, Dahua Lin, Kai Chen, and Xian-Ling Mao. Criticeval: Evaluating large-scale language model as critic. Advances in Neural Information Processing Systems, 37:66907-66960, 2024.", + "[11] Dawei Li, Bohan Jiang, Liangjie Huang, Alimohammad Beigi, Chengshuai Zhao, Zhen Tan, Amrita Bhattacharjee, Yuxuan Jiang, Canyu Chen, Tianhao Wu, et al. From generation to judgment: Opportunities and challenges of llm-as-a-judge. arXiv preprint arXiv:2411.16594, 2024.", + "[12] Jia Li, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Huang, Kashif Rasul, Longhui Yu, Albert Q Jiang, Ziju Shen, et al. Numinamath: The largest public dataset in ai4maths with 860k pairs of competition math problems and solutions. Hugging Face repository, 13:9, 2024.", + "[13] Jia LI, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Costa Huang, Kashif Rasul, Longhui Yu, Albert Jiang, Ziju Shen, Zihan Qin, Bin Dong, Li Zhou, Yann Fleureau, Guillaume Lample, and Stanislas Polu. Numinamath. [https://huggingface.co/AI-MO/NuminaMath-1.5](https://github.com/project-numina/aimo-progress-prize/blob/main/report/numina_dataset.pdf), 2024.", + "[14] Zicheng Lin, Zhibin Gou, Tian Liang, Ruilin Luo, Haowei Liu, and Yujiu Yang. Criticbench: Benchmarking llms for critique-correct reasoning. arXiv preprint arXiv:2402.14809, 2024.", + "[15] Liangchen Luo, Zi Lin, Yinxiao Liu, Lei Shu, Yun Zhu, Jingbo Shang, and Lei Meng. Critique ability of large language models. arXiv preprint arXiv:2310.04815, 2023.", + "[16] Ruotian Ma, Peisong Wang, Cheng Liu, Xingyan Liu, Jiaqi Chen, Bang Zhang, Xin Zhou, Nan Du, and Jia Li. S²r: Teaching llms to self-verify and self-correct via reinforcement learning. arXiv preprint arXiv:2502.12853, 2025.", + "[17] Dakota Mahan, Duy Van Phung, Rafael Rafailov, Chase Blagden, Nathan Lile, Louis Castricato, Jan-Philipp Franken, Chelsea Finn, and Alon Albalak. Generative reward models. arXiv preprint arXiv:2410.12832, 2024.", + "[18] Nat McAleese, Rai Michael Pokorny, Juan Felipe Ceron Uribe, Evgenia Nitishinskaya, Maja Trebacz, and Jan Leike. Llm critics help catch llm bugs. arXiv preprint arXiv:2407.00215, 2024." + ], + "bbox": [ + 112, + 150, + 888, + 917 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 962, + 508, + 974 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[19] Qian Pan, Zahra Ashktorab, Michael Desmond, Martin Santillan Cooper, James Johnson, Rahul Nair, Elizabeth Daly, and Werner Geyer. Human-centered design recommendations for lmm-as-a-judge. arXiv preprint arXiv:2407.03479, 2024.", + "[20] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017.", + "[21] Guijin Son, Hyunwoo Ko, Hoyoung Lee, Yewon Kim, and Seunghyeok Hong. Llm-as-a-judge & reward model: What they can and cannot do. arXiv preprint arXiv:2409.11239, 2024.", + "[22] Linzhuang Sun, Hao Liang, Jingxuan Wei, Bihui Yu, Tianpeng Li, Fan Yang, Zenan Zhou, and Wentao Zhang. Mm-verify: Enhancing multimodal reasoning with chain-of-thought verification. arXiv preprint arXiv:2502.13383, 2025.", + "[23] Rich Sutton. Verification, the key to ai. URL http://incompleteideas.net/IncIdeas/KeytoAI.html.", + "[24] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025.", + "[25] Yubo Wang, Xiang Yue, and Wenhu Chen. Critique fine-tuning: Learning to critique is more effective than learning to imitate. arXiv preprint arXiv:2501.17703, 2025.", + "[26] Zhihui Xie, Liyu Chen, Weichao Mao, Jingjing Xu, Lingpeng Kong, et al. Teaching language models to critique via reinforcement learning. arXiv preprint arXiv:2502.03492, 2025.", + "[27] Jiayi Ye, Yanbo Wang, Yue Huang, Dongping Chen, Qihui Zhang, Nuno Moniz, Tian Gao, Werner Geyer, Chao Huang, Pin-Yu Chen, et al. Justice or prejudice? quantifying biases in llm-as-a-judge. arXiv preprint arXiv:2410.02736, 2024.", + "[28] Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, et al. Dapo: An open-source llm reinforcement learning system at scale. arXiv preprint arXiv:2503.14476, 2025.", + "[29] Yue Yu, Zhengxing Chen, Aston Zhang, Liang Tan, Chenguang Zhu, Richard Yuanzhe Pang, Yundi Qian, Xuewei Wang, Suchin Gururangan, Chao Zhang, et al. Self-generated critiques boost reward modeling for language models. arXiv preprint arXiv:2411.16646, 2024.", + "[30] Wojciech Zaremba, Evgenia Nitishinskaya, Boaz Barak, Stephanie Lin, Sam Toyer, Yaodong Yu, Rachel Dias, Eric Wallace, Kai Xiao, Johannes Heidecke, et al. Trading inference-time compute for adversarial robustness. arXiv preprint arXiv:2501.18841, 2025.", + "[31] Zhiyuan Zeng, Qinyuan Cheng, Zhangyue Yin, Yunhua Zhou, and Xipeng Qiu. Revisiting the test-time scaling of o1-like models: Do they truly possess test-time scaling capabilities? arXiv preprint arXiv:2502.12215, 2025.", + "[32] Lunjun Zhang, Arian Hosseini, Hritik Bansal, Mehran Kazemi, Aviral Kumar, and Rishabh Agarwal. Generative verifiers: Reward modeling as next-token prediction. arXiv preprint arXiv:2408.15240, 2024.", + "[33] Eric Zhao, Pranjal Awasthi, and Sreenivas Gollapudi. Sample, scrutinize and scale: Effective inference-time search by scaling verification. arXiv preprint arXiv:2502.01839, 2025.", + "[34] Jian Zhao, Runze Liu, Kaiyan Zhang, Zhimu Zhou, Junqi Gao, Dong Li, Jiafei Lyu, Zhouyi Qian, Biqing Qi, Xiu Li, et al. Genprm: Scaling test-time compute of process reward models via generative reasoning. arXiv preprint arXiv:2504.00891, 2025.", + "[35] Jianyuan Zhong, Zeju Li, Zhijian Xu, Xiangyu Wen, and Qiang Xu. Dyve: Thinking fast and slow for dynamic process verification. arXiv preprint arXiv:2502.11157, 2025." + ], + "bbox": [ + 109, + 123, + 888, + 814 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 963, + 508, + 974 + ], + "page_idx": 15 + } +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10337/2d1c700e-c8fa-4c7f-b020-a943b3c7241d_model.json b/data/2025/2504_10xxx/2504.10337/2d1c700e-c8fa-4c7f-b020-a943b3c7241d_model.json new file mode 100644 index 0000000000000000000000000000000000000000..f93161b8f5d96adaa14f3f73edde0cd7f5ebfbcd --- /dev/null +++ b/data/2025/2504_10xxx/2504.10337/2d1c700e-c8fa-4c7f-b020-a943b3c7241d_model.json @@ -0,0 +1,2300 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.11, + 0.064, + 0.366, + 0.088 + ], + "angle": 0, + "content": "ByteDance | Seed" + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.128, + 0.887, + 0.156 + ], + "angle": 0, + "content": "Heimdall: test-time scaling on the generative verification" + }, + { + "type": "text", + "bbox": [ + 0.4, + 0.194, + 0.597, + 0.213 + ], + "angle": 0, + "content": "Wenlei Shi, Xing Jin" + }, + { + "type": "text", + "bbox": [ + 0.43, + 0.224, + 0.569, + 0.24 + ], + "angle": 0, + "content": "ByteDance Seed" + }, + { + "type": "title", + "bbox": [ + 0.453, + 0.294, + 0.546, + 0.311 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.15, + 0.322, + 0.848, + 0.624 + ], + "angle": 0, + "content": "An AI system can create and maintain knowledge only to the extent that it can verify that knowledge itself [23]. Recent work on long Chain-of-Thought reasoning has demonstrated great potential of LLMs on solving competitive problems, but their verification ability remains to be weak and not sufficiently investigated. In this paper, we propose Heimdall, the long CoT verification LLM that can accurately judge the correctness of solutions. With pure reinforcement learning, we boost the verification accuracy from \\(62.5\\%\\) to \\(94.5\\%\\) on competitive math problems. By scaling with repeated sampling, the accuracy further increases to \\(97.5\\%\\). Through human evaluation, Heimdall demonstrates impressive generalization capabilities, successfully detecting most issues in challenging math proofs, the type of which is not included during training. Furthermore, we propose Pessimistic Verification to extend the functionality of Heimdall to scaling up the problem solving. It calls Heimdall to judge the solutions from a solver model and based on the pessimistic principle, selects the most likely correct solution with the least uncertainty. Taking DeepSeek-R1-Distill-Qwen-32B as the solver model, Pessimistic Verification improves the solution accuracy on AIME2025 from \\(54.2\\%\\) to \\(70.0\\%\\) with \\(16\\times\\) compute budget and to \\(83.3\\%\\) with more compute budget. With the stronger solver Gemini 2.5 Pro, the score reaches \\(93.0\\%\\). Finally, we prototype an automatic knowledge discovery system, a ternary system where one poses questions, another provides solutions, and the third verifies the solutions. Using the data synthesis work NuminaMath [13] for the first two components, Heimdall effectively identifies problematic records within the dataset and reveals that nearly half of the data is flawed, which interestingly aligns with the recent ablation studies from NuminaMath." + }, + { + "type": "text", + "bbox": [ + 0.151, + 0.635, + 0.292, + 0.649 + ], + "angle": 0, + "content": "Date: April 17, 2025" + }, + { + "type": "text", + "bbox": [ + 0.151, + 0.65, + 0.819, + 0.665 + ], + "angle": 0, + "content": "Correspondence: Wenlei Shi at wenlei.shi@bytedance.com, Xing Jin at jinxing.9@bytedance.com" + }, + { + "type": "title", + "bbox": [ + 0.11, + 0.724, + 0.266, + 0.74 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.754, + 0.888, + 0.906 + ], + "angle": 0, + "content": "In the realm of scientific and mathematical discovery, the process of logistic verification and validation is as crucial as the initial act of problem-solving. One of the most illustrative examples of this principle can be found in the famous thought experiment 'chasing a beam of light' by Albert Einstein, where he found the paradox within the established physics theories and further formulated the principle of the constancy of the speed of light, a cornerstone of his Special Theory of Relativity. Recently, the problem solving ability of LLMs have been significantly improved. With the long Chain of Thought(CoT) reasoning, advanced LLMs are now able to effectively solve complex competition-level problems in both math and code domains. However, the verification ability of LLMs has not been sufficiently investigated. On one hand, although the intelligence of general purported LLM increases rapidly with the long CoT capabilities, we find that current SOTA models with direct prompting [5, 21] are not good at verifications on complex problems, e.g., o1-mini only achieves" + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.271, + 0.061, + 0.707 + ], + "angle": 270, + "content": "arXiv:2504.10337v2 [cs.AI] 16 Apr 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.938, + 0.505, + 0.949 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.116, + 0.125, + 0.368, + 0.309 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.371, + 0.125, + 0.627, + 0.31 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.627, + 0.125, + 0.882, + 0.309 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.323, + 0.887, + 0.421 + ], + "angle": 0, + "content": "Figure 1 Scaling of Heimdall. Left: the verification accuracy scales with the response length during RL training. With more reasoning tokens, Heimdall gives more accurate judgment on the solutions on AIME2024. Middle: the verification accuracy scales with repeated sampling and Majority Voting. By sampling multiple verification trajectories and voting, the accuracy can be further improved. Right: with Heimdall scoring the solutions on AIME2025, the problem solving accuracy scales with the number of solutions. We verify 16 times on each solution and select the most likely correct one with Pessimistic Verification \\((\\times 16)\\). When inter-playing with various solver models, Heimdall gives significant improvements over pure solver-based Majority Voting(MV)." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.447, + 0.884, + 0.493 + ], + "angle": 0, + "content": "80.9% on our evaluation dataset. On the other hand, some work [16, 17, 22, 25, 29, 32, 35] trains a dedicated model for verification or critique but the high-quality verification data is hard to collect, which limits the verification capability and hence impedes the application to challenging problems." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.501, + 0.885, + 0.743 + ], + "angle": 0, + "content": "In this paper, we claim that verifying if a solution is correct is a special type of problem solving, i.e., a true/false question and involves step-by-step judgment on the solution. Inspired by the recent progress on the long CoT reasoning, we propose to train a long CoT verifier through reinforcement learning. We name it Heimdall, symbolizing its sharp ability to detect errors and safeguards the correctness of knowledge. We leverage PPO [20] algorithm and find that the data processing is critical to the RL training. Specifically, two types of problems hinder the optimization, i.e., easy problems with only correct solutions and hard problems with only wrong solutions, both of which lack contrastive examples and tends to guide the verifier to simply identify the hardness of a problem, rather than finding the wrong position in the solution. By filtering out the two cases, the model learns the verification ability more effectively. Taking the competitive math problems as our primary experimental domain, we show that the verification ability follows the test time scaling law where the accuracy improves significantly from \\(62.5\\%\\) to \\(94.5\\%\\) as the response length grows, as is shown the left of Figure 1. The performance can be further improved by sampling multiple verifications and voting on the judgment results. In the middle of Figure 1, the accuracy grows from \\(94.5\\%\\) to \\(97.5\\%\\) on AIME2024 as the number of verifications grows from 2 to 64. Furthermore, the evaluation from human experts shows that Heimdall generalizes well on math proof problems although it is trained with only the calculation problems with explicit answers." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.75, + 0.885, + 0.931 + ], + "angle": 0, + "content": "In addition, we extend the usage of Heimdall to scale up the problem solving. Suppose the solver model gives multiple solutions for a problem and Heimdall judges the correctness of each solution for multiple times. We can select the best solution based on the verification results. We frame the selection process as a multi-arm bandit problem where solutions with the same conclusion are treated as multiple visits to the same 'arm'. Based on the pessimism principle, we propose the solution selection algorithm called Pessimistic Verification that minimizes the uncertainty of selecting wrong solutions. The algorithm unifies Majority Voting and reward model based Best-of-N by balancing the contributions of the solver and the verifier, and empirically demonstrates better scaling over both algorithms. Taking DeepSeek-R1-Distill-Qwen-32B [6] as the solver model, which scores 54 on AIME2025, Pessimistic Verification raises the scores to 70 with \\(\\times 16\\) compute, matching the performance of o1, and to 83.3 with more compute. We further test with stronger solver models, including DeepSeek-R1 [6] and Gemini 2.5 Pro[3]. As is shown in the right of Figure 1, Pessimistic Verification with Heimdall consistently improves the problem solving of various models and with Gemini 2.5 Pro, the" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.964, + 0.506, + 0.975 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.11, + 0.124, + 0.888, + 0.157 + ], + "angle": 0, + "content": "accuracy on AIME2025 reaches \\(93\\%\\), matching the currently reported SOTA with multiple attempts by Grok3 [4]." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.162, + 0.888, + 0.254 + ], + "angle": 0, + "content": "Finally, we create a prototype to demonstrate the utility of Heimdall on the automatic knowledge discovery. We use the work of math data synthesis called NuminaMath [12] as the procedure of automatically proposing new problems and the corresponding solutions, and call Heimdall to detect errors in the synthetic dataset. Human annotation demonstrates that Heimdall accurately identifies the errors in the dataset. The verification result also reveals that the quality of current synthetic dataset is poor, which is consistent with the authors' finding that removing the dataset from training improves the performance of the solver model [13]." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.261, + 0.441, + 0.275 + ], + "angle": 0, + "content": "In summary, our contributions are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.283, + 0.884, + 0.328 + ], + "angle": 0, + "content": "- We propose Heimdall, the long CoT verifier by reinforcement learning and demonstrate the superior accuracy than top-tier LLMs. Heimdall also shows good generalization ability on out-of-domain problems, such as math proof problems." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.336, + 0.884, + 0.381 + ], + "angle": 0, + "content": "- We propose a unified algorithm called Pessimistic Verification for inference time scaling on problem solving. Empirically, it scales better than the vanilla Majority Voting or the reward-model based Best-of-N and achieve SOTA accuracy on AIME2025." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.389, + 0.884, + 0.433 + ], + "angle": 0, + "content": "- We create a prototype to show the utility of Heimdall in the autonomous knowledge discovery, where Heimdall is used to identify the correctness of the problem-solution pairs synthesized by another LLM. Human evaluations show that Heimdall can effectively detect the flaws in the synthetic data." + }, + { + "type": "list", + "bbox": [ + 0.135, + 0.283, + 0.884, + 0.433 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.45, + 0.281, + 0.466 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.48, + 0.888, + 0.587 + ], + "angle": 0, + "content": "Reasoning model. Reasoning models outperform previous general-purpose models on challenging reasoning tasks. During the chain of thought(CoT) reasoning, they keep reflecting their claims and searching viable solutions, utilizing more compute budget and providing better and more robust results. OpenAI first released its reasoning models[2, 7, 30] that performs significantly better on competitive tests like AIME and CodeForces than its previous models. Work by DeepSeek[6] and Kimi[24] independently propose different ways of reinforcement learning to trigger the reflection and searching capability in their base models. Recently, Grok3 [4] and Gemini 2.5 Pro [3] also demonstrate their impressive reasoning capabilities through long CoT." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.593, + 0.888, + 0.791 + ], + "angle": 0, + "content": "Generative evaluation. Recently some work are interested in improving the verification ability of the LLMs. Some [8, 29, 32, 34] explores finetuning an LLM with synthetic verification data to improve its verification ability. However, it is hard to synthesize high-quality data if the LLM inherently lacks the verification skills. One of the related topic is LLM-as-a-Judge [5, 11] where a LLM is prompted to evaluate responses from other LLMs. The work[11] leverages strong LLMs as judges to evaluate other models in various domains and reveals that strong LLM judges have good generalization ability in different domains. Some works design the judge system and analyze of the judgment behavior[19, 21, 27]. However, prompting is only effective on easy tasks, and when it comes to competitive tests, the general purported reasoning models performs not quite well, as is reveals from our test in Section 4. Another similar topic is critique [9, 10, 14, 15, 18], which often focuses on code and math problems and is used for giving suggestions for further revision. Critique fine-tuning [25] shows that fine-tuning on a high-quality critique data is beneficial to the reasoning ability of a base model. Several work, e.g., CTRL [26] leverages RL to train LLMs. However, they do not leverage the long CoT ability, which limits the verification performance on complex reasoning problems." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.805, + 0.245, + 0.823 + ], + "angle": 0, + "content": "3 Approach" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.835, + 0.886, + 0.882 + ], + "angle": 0, + "content": "We define the verification task where we ask a model to judge if a solution to a problem is correct in its CoT and finally put its judgment result at the end of the response. Table 1 is the template of the verification prompt and the expected format of a response." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.963, + 0.506, + 0.975 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.129, + 0.136, + 0.867, + 0.183 + ], + "angle": 0, + "content": "Here is a math problem and a solution of it. Think step by step and verify if the final answer in the solution is correct. The last line of your response should be of the form Answer: $Answer (without quotes) where $Answer is 1 if the final answer in the solution is correct and 0 if incorrect." + }, + { + "type": "code", + "bbox": [ + 0.129, + 0.196, + 0.227, + 0.227 + ], + "angle": 0, + "content": "**Problem**\n${problem}" + }, + { + "type": "code", + "bbox": [ + 0.13, + 0.241, + 0.225, + 0.273 + ], + "angle": 0, + "content": "**Solution**\n${solution}" + }, + { + "type": "table_caption", + "bbox": [ + 0.357, + 0.299, + 0.638, + 0.313 + ], + "angle": 0, + "content": "Table 1 Prompt template for verification." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.339, + 0.502, + 0.357 + ], + "angle": 0, + "content": "3.1 Reinforcement learning for verification" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.365, + 0.889, + 0.472 + ], + "angle": 0, + "content": "RL Setup. Let \\(\\mathcal{D} = \\{(p_i, s_i, y_i)\\}_{i=1}^N\\) be our dataset, where \\(p_i\\) is a problem, \\(s_i\\) is a solution to the problem, which may be the response from a reasoning model and \\(y_i \\in \\{0, 1\\}\\) represents the correctness of the solution, with 1 indicating correctness and 0 indicating incorrectness. Given a triplet \\((p_i, s_i, y_i)\\), we prompt a LLM to check the correctness of the solution step-by-step and finally give a conclusion on the correctness, as is shown in Table 1. Denoting the prompt as \\(q_i\\), the verifier model \\(\\pi_\\theta(z_i, y_i'|q_i)\\) takes a prompt as input and generates the CoT \\(z_i\\) on judging the correctness of \\(y_i\\) and at last gives a boolean conclusion \\(y\\) if \\(s\\) is correct. The outcome reward function \\(R\\) is as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.408, + 0.48, + 0.589, + 0.521 + ], + "angle": 0, + "content": "\\[\nR (y, y ^ {\\prime}) = \\left\\{ \\begin{array}{l l} 1 & y = y ^ {\\prime}, \\\\ - 1 & y \\neq y ^ {\\prime}. \\end{array} \\right.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.538, + 0.318, + 0.553 + ], + "angle": 0, + "content": "Then the objective of RL is," + }, + { + "type": "equation", + "bbox": [ + 0.365, + 0.575, + 0.632, + 0.594 + ], + "angle": 0, + "content": "\\[\n\\mathcal {J} (\\theta) = \\mathbf {E} _ {(q, y) \\sim \\mathcal {D}, (z, y ^ {\\prime}) \\sim \\pi_ {\\theta} (q)} \\left[ R (y, y ^ {\\prime}) \\right]\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.606, + 0.887, + 0.622 + ], + "angle": 0, + "content": "We run the vanilla PPO algorithm on a reasoning model, and propose the following strategy for improvement." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.629, + 0.888, + 0.734 + ], + "angle": 0, + "content": "Data collection and filtering. We collect the dataset \\(\\mathcal{D}\\) by prompting one or multiple reasoning models to solve problems. For every problem in the dataset, we collect multiple solutions and construct a verification prompt with each solution using the template in Table 1. However, two cases may hurt the RL training, i.e., the extremely difficult problem, which we fail to sample any correct solutions and the extremely easy problems, which we fail to sample any wrong solutions. Such unbalanced data may teach the verifier to be biased on the difficulty of the problem, i.e., be optimistic on easy problems and pessimistic on difficult problems. Therefore, we do not include the data of the two cases in the training dataset." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.748, + 0.557, + 0.765 + ], + "angle": 0, + "content": "3.2 Solution selection by Pessimistic Verification" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.772, + 0.886, + 0.939 + ], + "angle": 0, + "content": "When tackling challenging problems, one can sample multiple solutions and leverage the verifier to identify the most likely correct one. By sampling verification responses multiple times, we can achieve more reliable judgments, thereby improving overall problem-solving performance. We propose a principled and flexible method for the inference time scaling along the two dimensions, i.e., the amount of solutions sampled from the solver model and the amount of verifications sampled from a verifier model. Denote the number of solutions to a problem as \\( N \\) and the number of verifications on each solution is \\( M \\). We initially conceptualize the selection process as a multi-arm bandit problem, where each arm corresponds to a distinct answer, and each verification constitutes a visit to an arm. The reward is the verification result, which can be either 1 or 0. Each time the solver generates a solution, the arm representing the solution's answer receives \\( M \\) visits and immediately accrues \\( M \\) rewards. The straight-forward approach is to calculate the average reward each arm receives as its score and greedily select the one with the highest score. However, for those with few visits, the score fluctuates" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.964, + 0.505, + 0.974 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.11, + 0.124, + 0.888, + 0.181 + ], + "angle": 0, + "content": "and can be unreliable. Following the pessimism principle in RL, we introduce the lower-confidence-bound, which adds an uncertainty penalty to the score. Let \\( r_0, r_1, \\ldots, r_K \\) be the average reward of each answer and \\( N_0, N_1, \\ldots, N_K \\) be the number of solutions that drives to a certain answer. The selection algorithm is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.37, + 0.182, + 0.887, + 0.214 + ], + "angle": 0, + "content": "\\[\n\\hat {a} := \\arg \\max _ {a _ {i}} \\left(r \\left(a _ {i}\\right) - \\alpha \\frac {\\ln (N M)}{N _ {i} M + 1}\\right) \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.226, + 0.889, + 0.272 + ], + "angle": 0, + "content": "where the parameter \\(\\alpha\\) is a hyper-parameter that balances the consideration of uncertainty in the decision-making process and \\(\\ln\\) is the natural logarithm. Intuitively, the first term reflects the signals from the verifier, while the second term accounts for the bias of solver in the answer space." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.279, + 0.884, + 0.308 + ], + "angle": 0, + "content": "- When \\( N_{i} \\) is small, the second term dominates, which neglects the verification and in the extreme case, it collapses to Majority Voting." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.317, + 0.883, + 0.347 + ], + "angle": 0, + "content": "- When \\( N_{i} \\) is large, the first term becomes more important, and in the extreme case, it simply selects the answer with the best verification score." + }, + { + "type": "list", + "bbox": [ + 0.135, + 0.279, + 0.884, + 0.347 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.354, + 0.886, + 0.4 + ], + "angle": 0, + "content": "The phase shift aligns with the fact that Majority Voting is trapped in the bias of the solver, for example when a wrong answer occurs more frequently than the correct one, and as \\( N \\) and \\( M \\) is large, the verification scores stabilize and we tend to trust more on it, because the verification is often easier than the solution." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.416, + 0.271, + 0.433 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.444, + 0.231, + 0.459 + ], + "angle": 0, + "content": "4.1 Dataset" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.468, + 0.889, + 0.577 + ], + "angle": 0, + "content": "Our experiment is on the math problems. The training dataset comes from the AoPS website and official math competition homepages, similar to that of DAPO [28]. We leverage DeepSeek-R1-Distill-Qwen-32B model as the policy model to generate 16 solutions to each problem. We leverage a rule-based program to check if the final answer in the solution is correct, which compares the reference answer of a problem and the answer in the solution and outputs the label, i.e., 1 for a correct response and 0 for the incorrect response. Then we construct the verification dataset with the prompt template in Table 1. To keep the prompt clean and short, we remove the `` part in each solution and only use the summary part." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.581, + 0.888, + 0.629 + ], + "angle": 0, + "content": "We test the verification ability on both AIME2024 and AIME2025, 60 questions in total. During training, we monitor the performance on AIME2024 and select the best checkpoint as the final version of Heimdall. Therefore, one can treat AIME2024 as the validation dataset and AIME2025 as the test dataset." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.641, + 0.357, + 0.658 + ], + "angle": 0, + "content": "4.2 Scaling of verification" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.665, + 0.888, + 0.848 + ], + "angle": 0, + "content": "Figure 2 shows the accuracy and the length of response tokens during RL training. As depicted by the blue curve, both accuracy and response length increase with the number of training steps, albeit at different rates. At the early stage, the accuracy improves rapidly, but the response length fluctuates. This is because a minor adjustment to the policy can significantly boost accuracy. Later, the response length grows constantly, while the accuracy gradually converges to \\(94.5\\%\\), because the model is learning to tackle the hardest part in the training dataset, which requires the increasingly more reasoning tokens. The red curve represents the RL training without the data filtering strategy, i.e., incorporating both extreme cases of difficult and easy problems in the dataset. As training progresses, the performance gap becomes more pronounced, indicating that the absence of contrastive examples detrimentally impacts performance. In addition, we test o1-mini with the same evaluation data, which is shown as the dash line in the left of Figure 2. Our model outperforms o1-mini in fewer than 20 steps, indicating substantial potential for enhancing the verification capabilities of general-purpose reasoning models." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.854, + 0.886, + 0.901 + ], + "angle": 0, + "content": "We further look into cases to understand what Heimdall has learned during the training. Table 3 shows the verification of a correct solution to a hard problem in AIME2025. Due to space limitations, we only highlight some key points. We can observe two types of checking:" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.907, + 0.884, + 0.939 + ], + "angle": 0, + "content": "- Forward checking. It checks if the reasoning chain in every step of the solution is correct, which is generally applicable to all problems." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.964, + 0.505, + 0.975 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.116, + 0.123, + 0.49, + 0.34 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.493, + 0.123, + 0.885, + 0.34 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.354, + 0.885, + 0.397 + ], + "angle": 0, + "content": "Figure 2 Accuracy and response length during RL training. PPO w/o data filtering is the RL training with all problems in the dataset. Left: the accuracy on AIME2024 with the training steps. Right: the response length on the training dataset with the training steps." + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.424, + 0.888, + 0.469 + ], + "angle": 0, + "content": "- Backward checking. It checks whether a conclusion, be it intermediate or final, fits the known constraints. For some types of problems like solving equations and finding the general term formula of a sequence, the backward checking is efficient and easy to implement." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.476, + 0.884, + 0.506 + ], + "angle": 0, + "content": "The case exemplifies the common task of deriving a general formula for a sequence. As illustrated, Heimdall applies both methods of validation to confirm the correctness of the solution." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.514, + 0.885, + 0.801 + ], + "angle": 0, + "content": "Next, we investigate how the verification ability scales as the number of verifications increases. We sample 64 solutions for each problem with the solver model and 64 verifications for each solution, resulting in a total of \\(30 \\times 64 \\times 64\\) responses on either AIME2024 or AIME2025 dataset. Denoting the number of verifications of each solution as \\(N\\), we randomly select \\(N\\) verifications for each solution from the data collected above, and determine the final score by some aggregation operation, e.g., Majority Voting and averaging. We repeat the process for 2048 times to eliminate any fluctuations in the statistics. Taking Majority Voting as the aggregation operation, we compute the accuracy, the false positive rate and the false negative rate at every compute budget \\(N\\), as is shown in the top of Figure 3. In addition, we take the average of \\(N\\) scores, a decimal number in [0, 1], as the final score, and draw the curve of the AUC score in the bottom-left of Figure 3. It shows that Heimdall's performance can be significantly improved by simply repeat sampling more trajectories. As \\(N\\) goes larger, the performance gradually converges to a upper limit. For example, the accuracy converges to about \\(97.5\\%\\) on AIME2024 and \\(96.0\\%\\) on AIME2025, and the remaining failure cases are the bias inherent in the model that could not be eliminated by adding more compute budget. We further analyze the distribution of those failure cases. For each problem, we calculate two statistics, one is the difficulty of solving it, which is estimated by the pass rate over its 64 solutions. and the other is the difficulty of verifying its solutions, which is estimated by the total number of verification failures on its solutions. Taking the two values as the x-axis and the y-axis respectively, we visualize their correlations in the bottom-right of Figure 3. We observe that the difficulty of a problem does not necessarily correlate to the difficulty of verifying its solutions. For example, Heimdall fails 17/64 times on a problem whose pass-rate is \\(67\\%\\)." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.808, + 0.885, + 0.869 + ], + "angle": 0, + "content": "In addition, a clear observation is that the performance of Heimdall on AIME2025 is generally worse than that on AIME2024. We believe the main reason is that the verifying solutions on AIME2025 is harder than that on AIME2024. One evidence is that, o1-mini achieves \\(80.9\\%\\) in AIME2024 and \\(75.3\\%\\) in AIME2025, whose degradation is larger than that of our model." + }, + { + "type": "title", + "bbox": [ + 0.11, + 0.882, + 0.547, + 0.899 + ], + "angle": 0, + "content": "4.3 Scaling of problem solving with verification" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.907, + 0.884, + 0.937 + ], + "angle": 0, + "content": "In terms of problem solving, repeated sampling[1] is known to scale with the inference time compute. We evaluate multiple scaling algorithms, including both with and without the verifier. We analyze the inference" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.963, + 0.505, + 0.974 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.121, + 0.253, + 0.502, + 0.472 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.253, + 0.885, + 0.472 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.117, + 0.474, + 0.501, + 0.694 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.474, + 0.884, + 0.694 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.708, + 0.887, + 0.807 + ], + "angle": 0, + "content": "Figure 3 The inference-time scaling of verification ability on problem solutions in AIME2024 and AIME2025. Top-left: We show the accuracy of Heimdall when we sample multiple verification responses and make the judgment by majority voting. Top-right: We show the decreasing false-negative rate(FNR) and false-positive rate(FPR) as we scale up verification responses with majority voting. Bottom-left: We calculate the average score of verification responses and draw the AUC along each number of responses. Bottom-right: We collect the verification failure cases on every math problem and draw the relation between the difficulty of the problem and the number of verification failures, which reveals that the verification difficulty may not necessarily correlate with the difficulty of the original problem." + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.963, + 0.505, + 0.974 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.132, + 0.269, + 0.15, + 0.277 + ], + "angle": 0, + "content": "···" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.291, + 0.226, + 0.305 + ], + "angle": 0, + "content": "**Problem**" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.306, + 0.657, + 0.325 + ], + "angle": 0, + "content": "Let the sequence of rationals \\( x_{1}, x_{2}, \\ldots \\) be defined such that \\( x_{1} = \\frac{25}{11} \\) and" + }, + { + "type": "equation", + "bbox": [ + 0.401, + 0.334, + 0.594, + 0.368 + ], + "angle": 0, + "content": "\\[\nx _ {k + 1} = \\frac {1}{3} \\left(x _ {k} + \\frac {1}{x _ {k}} - 1\\right).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.378, + 0.867, + 0.409 + ], + "angle": 0, + "content": "\\(x_{2025}\\) can be expressed as \\(\\frac{m}{n}\\) for relatively prime positive integers \\(m\\) and \\(n\\). Find the remainder when \\(m + n\\) is divided by 1000." + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.422, + 0.225, + 0.437 + ], + "angle": 0, + "content": "**Solution**" + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.444, + 0.15, + 0.452 + ], + "angle": 0, + "content": "···" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.466, + 0.247, + 0.481 + ], + "angle": 0, + "content": "**Verification**" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.483, + 0.195, + 0.495 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.497, + 0.865, + 0.527 + ], + "angle": 0, + "content": "Okay, so I need to figure out if the given solution to this problem is correct. Let me read through it again to make sure I understand." + }, + { + "type": "text", + "bbox": [ + 0.131, + 0.529, + 0.269, + 0.542 + ], + "angle": 0, + "content": "The problem is: ..." + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.543, + 0.72, + 0.558 + ], + "angle": 0, + "content": "The solution provided has four key steps. Let me go through each step and verify." + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.558, + 0.865, + 0.588 + ], + "angle": 0, + "content": "Step 1: They let \\( x_{k} = a_{k} / b_{k} \\) with \\( a_{k} \\) and \\( b_{k} \\) coprime. Then they write the recurrence relations as \\( a_{k + 1} = a_k^2 +b_k^2 -a_kb_k \\) and \\( b_{k + 1} = 3a_kb_k \\)." + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.588, + 0.867, + 0.62 + ], + "angle": 0, + "content": "Wait, let's verify that. Starting from \\(x_{k + 1} = (1 / 3)(x_k + 1 / x_k - 1)\\) . If \\(x_{k} = a_{k} / b_{k}\\) , then \\(1 / x_{k} = b_{k} / a_{k}\\) So \\(x_{k} + 1 / x_{k} = (a_{k}^{2} + b_{k}^{2}) / (a_{k}b_{k})\\) . Okay, so step 1 seems correct. [Forward checking]" + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.626, + 0.149, + 0.632 + ], + "angle": 0, + "content": "···" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.633, + 0.867, + 0.664 + ], + "angle": 0, + "content": "Wait let me confirm with \\( S_{3} \\): \\( a_{3} = a_{2}^{2} + b_{2}^{2} - a_{2}b_{2} \\). Wait \\( a_{2} = 471 \\), \\( b_{2} = 825 \\). So \\( a_{3} = 471^{2} + 825^{2} - 471*825 \\). [Backward checking]" + }, + { + "type": "text", + "bbox": [ + 0.131, + 0.671, + 0.203, + 0.692 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.693, + 0.865, + 0.724 + ], + "angle": 0, + "content": "The solution correctly arrives at the conclusion despite some initial confusion in their explanation. They properly computed each part modulo 1000, leading to the correct final result." + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.725, + 0.206, + 0.737 + ], + "angle": 0, + "content": "Answer: 1" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.767, + 0.885, + 0.809 + ], + "angle": 0, + "content": "Table 2 A case of verification on hard problems. The verification process contains both forward checking, i.e., checking if each step is correct and backward checking, i.e., checking if a conclusion matches constraints, as is shown in the position of the red texts." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.964, + 0.504, + 0.974 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.116, + 0.125, + 0.441, + 0.352 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.443, + 0.125, + 0.882, + 0.352 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.365, + 0.885, + 0.45 + ], + "angle": 0, + "content": "Figure 4 The inference-time scaling of problem solving with Heimdall. The two figures show the accuracy on AIME datasets as the number of solutions scales up. Left: the problem solving accuracy on AIME2025 dataset scales with the number of solutions. The colored shaded area represents the area covered by the accuracy curves of a selection algorithm as the number of verifications increases from 1 to 64. Right: the contour map of the accuracy of Pessimistic Verification as the number of solutions (x-axis) and the number of verifications (y-axis) increase. The red curve indicates the optimal configurations within various overall compute budgets." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.476, + 0.884, + 0.583 + ], + "angle": 0, + "content": "time scaling in two dimensions, i.e., the number of solutions \\( N \\) and the number of verifications on each solution \\( M \\). Considering the huge computational cost, we only evaluate on AIME2025, with \\( N \\in [2, 256] \\) and \\( M \\in [1, 64] \\). Similar to the analysis of verification accuracy, we first sample \\( N = 256 \\) solutions for each problem and \\( M = 64 \\) verifications for each solution as the complete data and then randomly select a subset to evaluate each scaling algorithm under a specific compute budget \\( (M, N) \\). We repeat the sampling for 2048 time to get a stable average score. We set \\( \\alpha = 0.1 \\) in Pessimistic Verification and also evaluate the other three selection algorithms as follows." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.599, + 0.884, + 0.661 + ], + "angle": 0, + "content": "Majority Voting Majority voting is one of the most commonly used inference time scaling methods. It first categorize the solutions, e.g., by the final answers for math problems. It simply selects the category that contains the largest number of solutions in it. As fore-mentioned, majority voting can be seen as a special case of Pessimistic verification, where \\(\\alpha\\) is large enough to overshadow the signal of verification." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.677, + 0.884, + 0.724 + ], + "angle": 0, + "content": "Shortest Majority Voting. The recent work [31] observes a length bias that the correct solutions are often shorter than incorrect ones for the same questions. Suppose the answer \\(a_i\\) occurs \\(c_i\\) times in the sampled responses and the average length of responses with the answer \\(a_i\\) is \\(l_i\\), the voting score for \\(a_i\\) is" + }, + { + "type": "equation", + "bbox": [ + 0.47, + 0.733, + 0.528, + 0.761 + ], + "angle": 0, + "content": "\\[\ns _ {i} = \\frac {c _ {i}}{l _ {i}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.779, + 0.885, + 0.856 + ], + "angle": 0, + "content": "Sampling-based Search. The work [33] leverages a commercial LLM as the verifier, and scales the inference-time computation on the number of sampled solutions and the number of verifications. During the selection, it calculates the average verification score of each solution and selects the solution with the largest score. Note that it does not group the solutions based on their answers, which is different from the special case of Pessimistic verification where \\(\\alpha\\) equals zero." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.863, + 0.884, + 0.924 + ], + "angle": 0, + "content": "Tie-breaking rules. The selection algorithm may encounter a tie situation, where multiple options have the same score. In principle, one can introduce another model to compare which option is better, but for simplicity, we leverage the length prior to break the tie, namely, selecting the option with the shortest average solution length." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.964, + 0.504, + 0.974 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.124, + 0.888, + 0.381 + ], + "angle": 0, + "content": "The left of Figure 4 shows how the accuracy of different scaling algorithms changes with the number of solutions \\( N \\). Majority voting are the worst among all scaling algorithms under the same \\( N \\). By employing the length prior, Shortest Majority Voting gives a better accuracy when \\( N \\) is small, but finally converges to \\( 70\\% \\), the same as that of Majority Voting. The red and blue areas represent the areas covered by the group of accuracy curves with \\( M \\in [1,64] \\) of Sampling-based Search and Pessimistic Verification respectively. As the figure shows, with the help of verification, the accuracy is significantly improved. In addition, Pessimistic Verification is better than Sampling-based Search when \\( M \\) is small. The reason is that the verification process is inherently probabilities. Even for a easy task, it is still possible that a wrong solution is judged as correct and is finally selected as the final solution. With the second term in Equation 1, Pessimistic Verification penalizes such uncertain cases and favors those with more visits. As \\( M \\) goes larger, the verification scores stabilize and the second term becomes smaller, and the gap between the two algorithms gets smaller. Interestingly, when \\( N \\) is large, the gap is large again. By checking the typical cases, we find that it comes from the grouping of solutions. Pessimistic Verification aggregate the solutions with their answers while Sampling-based Search treats each solution independently. Two solutions may have the same final answer, but their approaches or expressions can be entirely different. The aggregation takes this variance into consideration, so makes more robust selection. Note that such grouping is not generally applicable, e.g., grouping the solutions of proof problems is not straightforward. In those cases, we expect the two algorithm converges to the same limit." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.389, + 0.888, + 0.51 + ], + "angle": 0, + "content": "Taking the accuracy of Pessimistic Verification as a function of \\( M \\) and \\( N \\), we draw the contour map in the right of Figure 4. \\( M = 0 \\) represents the vanilla Majority Voting without verifications. We can see that increasing either \\( N \\) or \\( M \\) improves the performance. Each point \\( (M, N) \\) involves \\( M \\) responses by the solver model and \\( M \\times N \\) responses by the verifier, which is \\( M \\times (N + 1) \\) responses in total. By minimizing the overall budget, we derive the compute-optimal configurations for different compute budgets, with the constraint that \\( M \\) and \\( N \\) are non-negative integers. As is shown in the figure, we need to alternately increase \\( N \\) and \\( M \\), but we should increase \\( N \\) more frequently. The reason is that the correct answer for a hard problem is sparse. To get the problem solved with a scaling algorithm, we first need to give a sufficient budget to get the correct answer." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.517, + 0.888, + 0.669 + ], + "angle": 0, + "content": "Remaining room for improvement. Can Heimdall be better? The black dashed curve in the left of Figure 4 is Best-of-N that selects the response that equals the ground-truth answer, which is the upper-limit of any scaling algorithms. When \\( N \\) is small, Pessimistic Verification Pessimistic Verification performs near the upper limit, but the gap widens as \\( N \\) increases. Consider the configuration \\( M = 256 \\), \\( N = 64 \\). Pessimistic Verification gets a score of \\( 83.3\\% \\) and the upper limit is \\( 93.3\\% \\), so the gap is 3 problems. Looking into the individual problems, we find that there are 4 problems that have only one correct solution among the 256 solutions. Heimdall manages to identify the correct solution on one of them, which is the case fore-mentioned in Table 3, but fails on the other three problems. The failed three problems involve spatial reasoning, which the base model of Heimdall is not very skilled at. We believe that as the ability of the base model becomes better, the verification ability can reach the upper limit." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.676, + 0.888, + 0.858 + ], + "angle": 0, + "content": "Coordination with other solver models. In the previous experiments, we use DeepSeek-R1-Distill-Qwen-32B as the solver model, which is the model to collect data during the training phase. To test Heimdall's generalization to other solver models, we test on DeepSeek-R1 and Gemini 2.5 Pro. For DeepSeek-R1, we directly extract the summary in its response, while for Gemini 2.5 Pro, we leverage another LLM to summarize its solution because we observe that its responses contains many reflections, which might confuse the verification process. Due to the limited compute budget, we set the total number of verifications to 16 and the total number of solving attempts to 64. We find that the verification accuracy on DeepSeek-R1 and Gemini 2.5 Pro is \\(90.1\\%\\) and \\(89.9\\%\\), respectively, close to that on DeepSeek-R1-Distill-Qwen-32B. The scaling in problem-solving, as is shown in the right of Figure 1, indicates that Heimdall consistently improves the accuracy with various solver models and using stronger solver models results in higher accuracy. Meanwhile, the improvements for Gemini 2.5 Pro is smaller compared to those of the other models. The reason is that the accuracy is already high on AIME2025 and the base model of Heimdall inherently lacks some ability on certain problems." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.873, + 0.527, + 0.891 + ], + "angle": 0, + "content": "5 Generalization to math proof problems" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.903, + 0.888, + 0.934 + ], + "angle": 0, + "content": "Heimdall is trained on math problems with explicit answers because it is easy to determine the correctness of the solution. However, there are many other types of problems, some of which might be more challenging." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.963, + 0.509, + 0.975 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.13, + 0.26, + 0.867, + 0.306 + ], + "angle": 0, + "content": "Here is a math problem and a solution of it. Think step by step and verify if each proof step in solution is correct. The last line of your response should be of the form Answer: $Answer (without quotes) where $Answer is 1 if the solution is correct and 0 if incorrect." + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.319, + 0.227, + 0.333 + ], + "angle": 0, + "content": "**Problem**" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.335, + 0.865, + 0.365 + ], + "angle": 0, + "content": "The n players of a hockey team gather to select their team captain. Initially, they stand in a circle, and each person votes for the person on their left." + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.366, + 0.867, + 0.472 + ], + "angle": 0, + "content": "The players will update their votes via a series of rounds. In one round, each player updates their vote, one at a time, according to the following procedure: At the time of the update, if a is voting for b, and b is voting for c, then a updates their vote to c. (Note that a, b, and c need not be distinct; if \\( \\mathrm{b} = \\mathrm{c} \\), then a's vote does not change for this update.) Every player updates their vote exactly once in each round, in an order determined by the players (possibly different across different rounds). They repeat this updating procedure for n rounds. Prove that at this time, all n players will unanimously vote for the same person." + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.485, + 0.226, + 0.516 + ], + "angle": 0, + "content": "\\*\\*Solution\\*\\* ..." + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.53, + 0.248, + 0.56 + ], + "angle": 0, + "content": "**Verification**" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.571, + 0.865, + 0.636 + ], + "angle": 0, + "content": "Wait, but since the solution didn't present this reasoning, their proof is flawed. They presented an incorrect model of the process, leading to an overestimation of the rounds needed, but the conclusion is still correct. However, the problem is to verify if each step in their solution is correct. Since their step 4's reasoning is wrong, their proof is incorrect." + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.642, + 0.204, + 0.664 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.665, + 0.865, + 0.724 + ], + "angle": 0, + "content": "The solution's step 4 incorrectly assumes that each person's vote jumps \\( n \\) times, leading to covering the entire ring. The correct approach is to note that each round reduces the number of distinct targets by one, leading to convergence in \\( n-1 \\) rounds. Thus, while the conclusion is correct, the reasoning in step 4 is flawed." + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.727, + 0.208, + 0.74 + ], + "angle": 0, + "content": "Answer: 0" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.769, + 0.885, + 0.813 + ], + "angle": 0, + "content": "Table 3 A case of verification on the math proof problems. The problem is P1 in Canadian Mathematical Olympiad 2025. We modify the prompt template to check the proof process rather than the final answer, which is the red text. Heimdall checks the proof step by step and finds that the solution lacks rigorous proofs in step 4." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.963, + 0.508, + 0.975 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.124, + 0.888, + 0.352 + ], + "angle": 0, + "content": "In this section, we would like to test Heimdall's capability in verifying mathematical proof problems. We select 10 proof problems from Mathematics Olympiad of different countries from the years 2024 and 2025, and leverage a solver model, i.e., DeepSeek-R1-Distill-Qwen-32B, to generate a proof process for each problem. Considering that the solver model is not good at spatial reasoning, we do not select geometry-related problems. Heimdall is then employed to check the correctness of each proof. Finally, we have experts evaluate both the proof processes and Heimdall's verifications. The solver model correctly solves 2 problems, while the remaining 8 are incorrect. To our surprise, Heimdall correctly judges 9/10 cases, identifying 2/2 correct proofs and detecting issues in 7/8 incorrect proofs. There is 1 problem where Heimdall fail to identify the error in the proof, resulting in a false-negative judgment. Looking into the specific cases, we find that Heimdall judges the correctness with both forward and backward checking, e.g., checking each step and testify with examples. It is capable of identifying most errors or unproven assumptions in the solution, but for some subtle problems, e.g., the assumption does not appear in the form of a proposition but is implicitly assumed during the proof process, Heimdall might fail. We believe that introducing the proof data in the RL training would improve the performance of Heimdall and an important direction in the future would be how to generate the dataset in large scale." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.368, + 0.609, + 0.386 + ], + "angle": 0, + "content": "6 Verification on automatic knowledge discovery" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.397, + 0.888, + 0.579 + ], + "angle": 0, + "content": "In the process of human exploration of the unknown, some scientists pose questions, some propose solutions to these questions, and others focus on verifying the correctness of solutions provided by their predecessors. Verification itself, as a crucial part of knowledge discovery, ensures the correctness of new knowledge. In this section, we design a prototype that simulates the stages of posing questions and solving them, using the synthesis of math problems by NuminaMath [12], to evaluate Heimdall's effectiveness in detecting problematic knowledge. NuminaMath open-sources a comprehensive collection of 860,000 pairs of math problems and reference solutions. It includes 229,982 MATH-level pairs and 62,108 AMC-AIME-level pairs that are synthesized from seed problems in MATH and AMC-AIME training dataset. We test Heimdall on the harder one, i.e., the AMC-AIME dataset. Flaws can exist either within the problem itself, such as an unsolvable problem, or within the solution provided. Theoretically, by checking if the solution satisfies all the requirements in the problem, Heimdall can detect both flaws. Meanwhile, the task also indirectly tests the generalization capabilities of Heimdall, as the qualities of the problems in the training set is much higher." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.586, + 0.473, + 0.917 + ], + "angle": 0, + "content": "We randomly sample 8,192 pairs of questions and solutions as the test set. For each pair, we construct the prompt and query Heimdall 8 times. We calculate the sum of the verification scores, which ranges from 0 to 8 and illustrate the distribution in Figure 5. As is shown, near a half of the data is labeled incorrect with a high confidence, which is consistent with the experience listed in the latest NuminaMath-1.5 website [13] that by the ablation study, the authors find that this dataset hurts a bit the performance and plan to remove all synthetic data until they find a way to reliably generate high-quality synthetic problems. To measure the verification correctness, we randomly select 10 cases in the 0-scored group and manually check their correctness. We find that for all cases, the judgments by Heimdall are correct. Table 4 is a case of the problematic data and the verification. Due to space limitations, we retain only the essential information, with the rest omitted. The question does not have the correct answer among its options. Meanwhile, the solution mistakenly" + }, + { + "type": "image", + "bbox": [ + 0.499, + 0.605, + 0.882, + 0.825 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.495, + 0.837, + 0.887, + 0.893 + ], + "angle": 0, + "content": "Figure 5 The distribution of verification scores on the problems of a synthetic dataset. The x-axis is the sum of scores across 8 verifications and the y-axis is the number of problems corresponding to each sum." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.918, + 0.884, + 0.933 + ], + "angle": 0, + "content": "uses an incorrect value in the equation and interestingly arrives at one of the given options. We hypothesize" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.963, + 0.509, + 0.975 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.131, + 0.214, + 0.194, + 0.229 + ], + "angle": 0, + "content": "Prompt:" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.242, + 0.869, + 0.274 + ], + "angle": 0, + "content": "Here is a math problem and a solution of it. Think step by step and verify if the final answer in the solution is correct. ..." + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.287, + 0.227, + 0.301 + ], + "angle": 0, + "content": "**Problem**" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.302, + 0.869, + 0.377 + ], + "angle": 0, + "content": "A rabbit starts jumping from the point \\((2,3)\\) on a rectangular board that extends from \\((0,0)\\) to \\((6,5)\\). Each jump it makes is either left, right, up, or down, and has a length of 1 unit. Each jump direction is uniformly and randomly chosen. The jumping sequence continues until the rabbit reaches any border of the rectangle. Calculate the probability that the rabbit's sequence of jumps ends on the top or bottom side of the rectangle." + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.378, + 0.365, + 0.395 + ], + "angle": 0, + "content": "(A). \\(\\frac{1}{4}\\) (B). \\(\\frac{17}{32}\\) (C). \\(\\frac{17}{24}\\) (D). \\(\\frac{1}{3}\\) (E). \\(\\frac{3}{4}\\)" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.407, + 0.225, + 0.421 + ], + "angle": 0, + "content": "**Solution**" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.422, + 0.868, + 0.467 + ], + "angle": 0, + "content": "Let \\( P_{(x,y)} \\) denote the probability of the rabbit ending on the top or bottom side starting from \\( (x,y) \\). Using symmetry about the center line \\( y = 2.5 \\) and \\( x = 3 \\), and recursive relations, we analyze the movements." + }, + { + "type": "text", + "bbox": [ + 0.131, + 0.475, + 0.15, + 0.482 + ], + "angle": 0, + "content": "···" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.483, + 0.865, + 0.514 + ], + "angle": 0, + "content": "Solving Equations: We solve this using recursive approaches and considering border cases explicitly. Assuming symmetry simplifies \\( P_{(3,2)} = P_{(3,3)} \\)." + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.514, + 0.697, + 0.529 + ], + "angle": 0, + "content": "Here, let's consider only two key points \\((2,3)\\) and \\((3,3)\\) for simplicity. Solving:" + }, + { + "type": "equation", + "bbox": [ + 0.344, + 0.539, + 0.655, + 0.568 + ], + "angle": 0, + "content": "\\[\nP _ {(2, 3)} = \\frac {1}{4} P _ {(2, 2)} + \\frac {1}{4} (1) + \\frac {1}{4} P _ {(1, 3)} + \\frac {1}{4} P _ {(3, 3)}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.583, + 0.15, + 0.589 + ], + "angle": 0, + "content": "··" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.589, + 0.868, + 0.643 + ], + "angle": 0, + "content": "Thus, the rabbit's sequence of jumps ends on the top or bottom side with a probability of \\(\\frac{17}{24}\\). The final answer is (C) \\(\\boxed{\\frac{17}{24}}\\)" + }, + { + "type": "title", + "bbox": [ + 0.131, + 0.653, + 0.208, + 0.667 + ], + "angle": 0, + "content": "Response:" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.681, + 0.197, + 0.696 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.131, + 0.705, + 0.15, + 0.712 + ], + "angle": 0, + "content": "···" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.713, + 0.203, + 0.726 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.727, + 0.868, + 0.787 + ], + "angle": 0, + "content": "The solution provided incorrectly assumes that \\( \\mathrm{P}(2,4) = 1 \\), which is not true since (2,4) is not a top or bottom border. They oversimplified the equations by not accounting for the correct dependencies of other points like \\( \\mathrm{P}(2,4) \\) and mishandled the symmetry in y-direction due to different boundary conditions \\( (\\mathrm{P} = 0 \\) at \\( \\mathrm{y} = 0 \\) and \\( \\mathrm{P} = 1 \\) at \\( \\mathrm{y} = 5 \\))." + }, + { + "type": "text", + "bbox": [ + 0.131, + 0.788, + 0.208, + 0.801 + ], + "angle": 0, + "content": "Answer: 0" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.83, + 0.886, + 0.86 + ], + "angle": 0, + "content": "Table 4 An example of problematic synthetic data and the verification. The verification correctly points out the problem in the solution." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.963, + 0.51, + 0.975 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.125, + 0.887, + 0.157 + ], + "angle": 0, + "content": "that the task is somewhat challenging for the LLM used for synthetic data generation, leading the LLM to hallucinate a superficially consistent but wrong output." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.171, + 0.27, + 0.187 + ], + "angle": 0, + "content": "7 Future Work" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.2, + 0.89, + 0.383 + ], + "angle": 0, + "content": "Firstly, the verification dataset is formed by simply extracting the summary part of a reasoning model, which as we observed is sometimes overly brief, e.g., driving to an answer with only one sentence. A more detailed explanation would make the verification easier. One can further improve the verification accuracy by summarizing the reasoning process with another LLM. Secondly, we mainly evaluate the verification ability on math problems that have final answers. There are many other types of tasks, e.g., coding problems and Mathematical proof problems. Although the learned ability is generalizable to other domains, we expect it beneficial to train with data in other domains. For example, in the context of coding problems, backward checking may take the alternative form of designing test cases. Lastly, we only prototype the usage of Heimdall in the automatic knowledge discovery. In real scenarios, posing valuable questions is a challenging task that demands both curiosity and keen insight. Such ability is often the critical part of the scientific discovery, which however is seldom investigated. We believe that as the general capabilities of LLM continues to advance, this direction will become more and more important." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.398, + 0.254, + 0.415 + ], + "angle": 0, + "content": "8 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.428, + 0.889, + 0.551 + ], + "angle": 0, + "content": "In this paper, we propose to train a long CoT verifier called Heimdall with reinforcement learning. On the competitive math problems, Heimdall achieves high accuracy and scales well along both the length of reasoning chains and the number of repeated generation. Through human evaluation, we find that Heimdall also shows impressive generalization ability on out-of-domain problems, such as math proofs. We further propose the inference time scaling algorithm called Pessimistic Verification, which incorporates a solver and Heimdall for problem solving. By scaling up the compute, we can achieve the performance comparable to top-tier models on challenging math problems. Lastly, we design a prototype of automatic knowledge discovery and demonstrate that Heimdall can reliably detect flaws in the synthetic data from another LLM." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.565, + 0.33, + 0.584 + ], + "angle": 0, + "content": "9 Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.594, + 0.887, + 0.626 + ], + "angle": 0, + "content": "We thank the data annotation team for their expertise on collecting the evaluation data and analyzing the verification outputs, including Bocheng Zhou, Weijian Zhao, Tong Sun and Zhiyuan Zhang." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.963, + 0.511, + 0.975 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.113, + 0.122, + 0.225, + 0.137 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.151, + 0.889, + 0.192 + ], + "angle": 0, + "content": "[1] Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling. arXiv preprint arXiv:2407.21787, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.199, + 0.888, + 0.243 + ], + "angle": 0, + "content": "[2] Ahmed El-Kishky, Alexander Wei, Andre Saraiva, Borys Minaev, Daniel Selsam, David Dohan, Francis Song, Hunter Lightman, Ignasi Clavera, Jakub Pachocki, et al. Competitive programming with large reasoning models. arXiv preprint arXiv:2502.06807, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.249, + 0.888, + 0.291 + ], + "angle": 0, + "content": "[3] Google. Gemini 2.5: Our most intelligent ai model, 2025. URL https://blog.google/technology/google-deepmind/gemini-model-thinking-updates-march-2025/#gemini-2-5-thinking. Accessed: 2025-03-25." + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.298, + 0.888, + 0.314 + ], + "angle": 0, + "content": "[4] Grok. Grok 3 beta — the age of reasoning agents, 2025. URL https://x.ai/news/grok-3. Accessed: 2025-02-19." + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.32, + 0.888, + 0.35 + ], + "angle": 0, + "content": "[5] Jiawei Gu, Xuhui Jiang, Zhichao Shi, Hexiang Tan, Xuehao Zhai, Chengjin Xu, Wei Li, Yinghan Shen, Shengjie Ma, Honghao Liu, et al. A survey on lmm-as-a-judge. arXiv preprint arXiv:2411.15594, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.355, + 0.888, + 0.398 + ], + "angle": 0, + "content": "[6] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.404, + 0.888, + 0.434 + ], + "angle": 0, + "content": "[7] Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.439, + 0.888, + 0.469 + ], + "angle": 0, + "content": "[8] Minki Kang, Jongwon Jeong, and Jaewoong Cho. T1: Tool-integrated self-verification for test-time compute scaling in small language models, 2025. URL https://arxiv.org/abs/2504.04718." + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.474, + 0.888, + 0.516 + ], + "angle": 0, + "content": "[9] Tian Lan, Wenwei Zhang, Chengqi Lyu, Shuaibin Li, Chen Xu, Heyan Huang, Dahua Lin, Xian-Ling Mao, and Kai Chen. Training language models to critique with multi-agent feedback. arXiv preprint arXiv:2410.15287, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.524, + 0.888, + 0.566 + ], + "angle": 0, + "content": "[10] Tian Lan, Wenwei Zhang, Chen Xu, Heyan Huang, Dahua Lin, Kai Chen, and Xian-Ling Mao. Criticeval: Evaluating large-scale language model as critic. Advances in Neural Information Processing Systems, 37:66907-66960, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.572, + 0.888, + 0.615 + ], + "angle": 0, + "content": "[11] Dawei Li, Bohan Jiang, Liangjie Huang, Alimohammad Beigi, Chengshuai Zhao, Zhen Tan, Amrita Bhattacharjee, Yuxuan Jiang, Canyu Chen, Tianhao Wu, et al. From generation to judgment: Opportunities and challenges of llm-as-a-judge. arXiv preprint arXiv:2411.16594, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.622, + 0.888, + 0.665 + ], + "angle": 0, + "content": "[12] Jia Li, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Huang, Kashif Rasul, Longhui Yu, Albert Q Jiang, Ziju Shen, et al. Numinamath: The largest public dataset in ai4maths with 860k pairs of competition math problems and solutions. Hugging Face repository, 13:9, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.67, + 0.888, + 0.728 + ], + "angle": 0, + "content": "[13] Jia LI, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Costa Huang, Kashif Rasul, Longhui Yu, Albert Jiang, Ziju Shen, Zihan Qin, Bin Dong, Li Zhou, Yann Fleureau, Guillaume Lample, and Stanislas Polu. Numinamath. [https://huggingface.co/AI-MO/NuminaMath-1.5](https://github.com/project-numina/aimo-progress-prize/blob/main/report/numina_dataset.pdf), 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.733, + 0.888, + 0.763 + ], + "angle": 0, + "content": "[14] Zicheng Lin, Zhibin Gou, Tian Liang, Ruilin Luo, Haowei Liu, and Yujiu Yang. Criticbench: Benchmarking llms for critique-correct reasoning. arXiv preprint arXiv:2402.14809, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.769, + 0.888, + 0.798 + ], + "angle": 0, + "content": "[15] Liangchen Luo, Zi Lin, Yinxiao Liu, Lei Shu, Yun Zhu, Jingbo Shang, and Lei Meng. Critique ability of large language models. arXiv preprint arXiv:2310.04815, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.804, + 0.888, + 0.845 + ], + "angle": 0, + "content": "[16] Ruotian Ma, Peisong Wang, Cheng Liu, Xingyan Liu, Jiaqi Chen, Bang Zhang, Xin Zhou, Nan Du, and Jia Li. S²r: Teaching llms to self-verify and self-correct via reinforcement learning. arXiv preprint arXiv:2502.12853, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.853, + 0.888, + 0.883 + ], + "angle": 0, + "content": "[17] Dakota Mahan, Duy Van Phung, Rafael Rafailov, Chase Blagden, Nathan Lile, Louis Castricato, Jan-Philipp Franken, Chelsea Finn, and Alon Albalak. Generative reward models. arXiv preprint arXiv:2410.12832, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.888, + 0.888, + 0.918 + ], + "angle": 0, + "content": "[18] Nat McAleese, Rai Michael Pokorny, Juan Felipe Ceron Uribe, Evgenia Nitishinskaya, Maja Trebacz, and Jan Leike. Llm critics help catch llm bugs. arXiv preprint arXiv:2407.00215, 2024." + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.151, + 0.889, + 0.918 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.963, + 0.509, + 0.975 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.124, + 0.889, + 0.169 + ], + "angle": 0, + "content": "[19] Qian Pan, Zahra Ashktorab, Michael Desmond, Martin Santillan Cooper, James Johnson, Rahul Nair, Elizabeth Daly, and Werner Geyer. Human-centered design recommendations for lmm-as-a-judge. arXiv preprint arXiv:2407.03479, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.173, + 0.888, + 0.204 + ], + "angle": 0, + "content": "[20] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.209, + 0.887, + 0.24 + ], + "angle": 0, + "content": "[21] Guijin Son, Hyunwoo Ko, Hoyoung Lee, Yewon Kim, and Seunghyeok Hong. Llm-as-a-judge & reward model: What they can and cannot do. arXiv preprint arXiv:2409.11239, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.244, + 0.888, + 0.288 + ], + "angle": 0, + "content": "[22] Linzhuang Sun, Hao Liang, Jingxuan Wei, Bihui Yu, Tianpeng Li, Fan Yang, Zenan Zhou, and Wentao Zhang. Mm-verify: Enhancing multimodal reasoning with chain-of-thought verification. arXiv preprint arXiv:2502.13383, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.293, + 0.828, + 0.31 + ], + "angle": 0, + "content": "[23] Rich Sutton. Verification, the key to ai. URL http://incompleteideas.net/IncIdeas/KeytoAI.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.315, + 0.889, + 0.358 + ], + "angle": 0, + "content": "[24] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.363, + 0.885, + 0.395 + ], + "angle": 0, + "content": "[25] Yubo Wang, Xiang Yue, and Wenhu Chen. Critique fine-tuning: Learning to critique is more effective than learning to imitate. arXiv preprint arXiv:2501.17703, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.399, + 0.885, + 0.429 + ], + "angle": 0, + "content": "[26] Zhihui Xie, Liyu Chen, Weichao Mao, Jingjing Xu, Lingpeng Kong, et al. Teaching language models to critique via reinforcement learning. arXiv preprint arXiv:2502.03492, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.434, + 0.885, + 0.478 + ], + "angle": 0, + "content": "[27] Jiayi Ye, Yanbo Wang, Yue Huang, Dongping Chen, Qihui Zhang, Nuno Moniz, Tian Gao, Werner Geyer, Chao Huang, Pin-Yu Chen, et al. Justice or prejudice? quantifying biases in llm-as-a-judge. arXiv preprint arXiv:2410.02736, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.483, + 0.885, + 0.526 + ], + "angle": 0, + "content": "[28] Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, et al. Dapo: An open-source llm reinforcement learning system at scale. arXiv preprint arXiv:2503.14476, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.532, + 0.885, + 0.576 + ], + "angle": 0, + "content": "[29] Yue Yu, Zhengxing Chen, Aston Zhang, Liang Tan, Chenguang Zhu, Richard Yuanzhe Pang, Yundi Qian, Xuewei Wang, Suchin Gururangan, Chao Zhang, et al. Self-generated critiques boost reward modeling for language models. arXiv preprint arXiv:2411.16646, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.581, + 0.885, + 0.625 + ], + "angle": 0, + "content": "[30] Wojciech Zaremba, Evgenia Nitishinskaya, Boaz Barak, Stephanie Lin, Sam Toyer, Yaodong Yu, Rachel Dias, Eric Wallace, Kai Xiao, Johannes Heidecke, et al. Trading inference-time compute for adversarial robustness. arXiv preprint arXiv:2501.18841, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.63, + 0.885, + 0.661 + ], + "angle": 0, + "content": "[31] Zhiyuan Zeng, Qinyuan Cheng, Zhangyue Yin, Yunhua Zhou, and Xipeng Qiu. Revisiting the test-time scaling of o1-like models: Do they truly possess test-time scaling capabilities? arXiv preprint arXiv:2502.12215, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.665, + 0.885, + 0.696 + ], + "angle": 0, + "content": "[32] Lunjun Zhang, Arian Hosseini, Hritik Bansal, Mehran Kazemi, Aviral Kumar, and Rishabh Agarwal. Generative verifiers: Reward modeling as next-token prediction. arXiv preprint arXiv:2408.15240, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.7, + 0.885, + 0.731 + ], + "angle": 0, + "content": "[33] Eric Zhao, Pranjal Awasthi, and Sreenivas Gollapudi. Sample, scrutinize and scale: Effective inference-time search by scaling verification. arXiv preprint arXiv:2502.01839, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.736, + 0.885, + 0.78 + ], + "angle": 0, + "content": "[34] Jian Zhao, Runze Liu, Kaiyan Zhang, Zhimu Zhou, Junqi Gao, Dong Li, Jiafei Lyu, Zhouyi Qian, Biqing Qi, Xiu Li, et al. Genprm: Scaling test-time compute of process reward models via generative reasoning. arXiv preprint arXiv:2504.00891, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.785, + 0.885, + 0.815 + ], + "angle": 0, + "content": "[35] Jianyuan Zhong, Zeju Li, Zhijian Xu, Xiangyu Wen, and Qiang Xu. Dyve: Thinking fast and slow for dynamic process verification. arXiv preprint arXiv:2502.11157, 2025." + }, + { + "type": "list", + "bbox": [ + 0.111, + 0.124, + 0.889, + 0.815 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.964, + 0.509, + 0.975 + ], + "angle": 0, + "content": "16" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10337/2d1c700e-c8fa-4c7f-b020-a943b3c7241d_origin.pdf b/data/2025/2504_10xxx/2504.10337/2d1c700e-c8fa-4c7f-b020-a943b3c7241d_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..71bb8baf45c5e41b4fe5ce5137a692e090f892e5 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10337/2d1c700e-c8fa-4c7f-b020-a943b3c7241d_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1700a8e64fcf4f725d3cf9a423556ae06fb3134eb40f55cb49f18addb3a07b6 +size 553025 diff --git a/data/2025/2504_10xxx/2504.10337/full.md b/data/2025/2504_10xxx/2504.10337/full.md new file mode 100644 index 0000000000000000000000000000000000000000..7747f0f846f694eec11504063a70b4175e6db34b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10337/full.md @@ -0,0 +1,344 @@ +# Heimdall: test-time scaling on the generative verification + +Wenlei Shi, Xing Jin + +ByteDance Seed + +# Abstract + +An AI system can create and maintain knowledge only to the extent that it can verify that knowledge itself [23]. Recent work on long Chain-of-Thought reasoning has demonstrated great potential of LLMs on solving competitive problems, but their verification ability remains to be weak and not sufficiently investigated. In this paper, we propose Heimdall, the long CoT verification LLM that can accurately judge the correctness of solutions. With pure reinforcement learning, we boost the verification accuracy from $62.5\%$ to $94.5\%$ on competitive math problems. By scaling with repeated sampling, the accuracy further increases to $97.5\%$ . Through human evaluation, Heimdall demonstrates impressive generalization capabilities, successfully detecting most issues in challenging math proofs, the type of which is not included during training. Furthermore, we propose Pessimistic Verification to extend the functionality of Heimdall to scaling up the problem solving. It calls Heimdall to judge the solutions from a solver model and based on the pessimistic principle, selects the most likely correct solution with the least uncertainty. Taking DeepSeek-R1-Distill-Qwen-32B as the solver model, Pessimistic Verification improves the solution accuracy on AIME2025 from $54.2\%$ to $70.0\%$ with $16\times$ compute budget and to $83.3\%$ with more compute budget. With the stronger solver Gemini 2.5 Pro, the score reaches $93.0\%$ . Finally, we prototype an automatic knowledge discovery system, a ternary system where one poses questions, another provides solutions, and the third verifies the solutions. Using the data synthesis work NuminaMath [13] for the first two components, Heimdall effectively identifies problematic records within the dataset and reveals that nearly half of the data is flawed, which interestingly aligns with the recent ablation studies from NuminaMath. + +Date: April 17, 2025 + +Correspondence: Wenlei Shi at wenlei.shi@bytedance.com, Xing Jin at jinxing.9@bytedance.com + +# 1 Introduction + +In the realm of scientific and mathematical discovery, the process of logistic verification and validation is as crucial as the initial act of problem-solving. One of the most illustrative examples of this principle can be found in the famous thought experiment 'chasing a beam of light' by Albert Einstein, where he found the paradox within the established physics theories and further formulated the principle of the constancy of the speed of light, a cornerstone of his Special Theory of Relativity. Recently, the problem solving ability of LLMs have been significantly improved. With the long Chain of Thought(CoT) reasoning, advanced LLMs are now able to effectively solve complex competition-level problems in both math and code domains. However, the verification ability of LLMs has not been sufficiently investigated. On one hand, although the intelligence of general purported LLM increases rapidly with the long CoT capabilities, we find that current SOTA models with direct prompting [5, 21] are not good at verifications on complex problems, e.g., o1-mini only achieves + +![](images/0191c427ea854db6b0b3df2857a2ba0d5b3133bacabb267a56124a118ed19753.jpg) +Figure 1 Scaling of Heimdall. Left: the verification accuracy scales with the response length during RL training. With more reasoning tokens, Heimdall gives more accurate judgment on the solutions on AIME2024. Middle: the verification accuracy scales with repeated sampling and Majority Voting. By sampling multiple verification trajectories and voting, the accuracy can be further improved. Right: with Heimdall scoring the solutions on AIME2025, the problem solving accuracy scales with the number of solutions. We verify 16 times on each solution and select the most likely correct one with Pessimistic Verification $(\times 16)$ . When inter-playing with various solver models, Heimdall gives significant improvements over pure solver-based Majority Voting(MV). + +![](images/85f80f96d11583bcc91e763e2173e36cf0836b619f69f65fa73aa939cc41a9b8.jpg) + +![](images/50ba8d1f5ebc6b794706a38186df025e695a0e378c116cc3e91b5a1a9fbd269e.jpg) + +80.9% on our evaluation dataset. On the other hand, some work [16, 17, 22, 25, 29, 32, 35] trains a dedicated model for verification or critique but the high-quality verification data is hard to collect, which limits the verification capability and hence impedes the application to challenging problems. + +In this paper, we claim that verifying if a solution is correct is a special type of problem solving, i.e., a true/false question and involves step-by-step judgment on the solution. Inspired by the recent progress on the long CoT reasoning, we propose to train a long CoT verifier through reinforcement learning. We name it Heimdall, symbolizing its sharp ability to detect errors and safeguards the correctness of knowledge. We leverage PPO [20] algorithm and find that the data processing is critical to the RL training. Specifically, two types of problems hinder the optimization, i.e., easy problems with only correct solutions and hard problems with only wrong solutions, both of which lack contrastive examples and tends to guide the verifier to simply identify the hardness of a problem, rather than finding the wrong position in the solution. By filtering out the two cases, the model learns the verification ability more effectively. Taking the competitive math problems as our primary experimental domain, we show that the verification ability follows the test time scaling law where the accuracy improves significantly from $62.5\%$ to $94.5\%$ as the response length grows, as is shown the left of Figure 1. The performance can be further improved by sampling multiple verifications and voting on the judgment results. In the middle of Figure 1, the accuracy grows from $94.5\%$ to $97.5\%$ on AIME2024 as the number of verifications grows from 2 to 64. Furthermore, the evaluation from human experts shows that Heimdall generalizes well on math proof problems although it is trained with only the calculation problems with explicit answers. + +In addition, we extend the usage of Heimdall to scale up the problem solving. Suppose the solver model gives multiple solutions for a problem and Heimdall judges the correctness of each solution for multiple times. We can select the best solution based on the verification results. We frame the selection process as a multi-arm bandit problem where solutions with the same conclusion are treated as multiple visits to the same 'arm'. Based on the pessimism principle, we propose the solution selection algorithm called Pessimistic Verification that minimizes the uncertainty of selecting wrong solutions. The algorithm unifies Majority Voting and reward model based Best-of-N by balancing the contributions of the solver and the verifier, and empirically demonstrates better scaling over both algorithms. Taking DeepSeek-R1-Distill-Qwen-32B [6] as the solver model, which scores 54 on AIME2025, Pessimistic Verification raises the scores to 70 with $\times 16$ compute, matching the performance of o1, and to 83.3 with more compute. We further test with stronger solver models, including DeepSeek-R1 [6] and Gemini 2.5 Pro[3]. As is shown in the right of Figure 1, Pessimistic Verification with Heimdall consistently improves the problem solving of various models and with Gemini 2.5 Pro, the + +accuracy on AIME2025 reaches $93\%$ , matching the currently reported SOTA with multiple attempts by Grok3 [4]. + +Finally, we create a prototype to demonstrate the utility of Heimdall on the automatic knowledge discovery. We use the work of math data synthesis called NuminaMath [12] as the procedure of automatically proposing new problems and the corresponding solutions, and call Heimdall to detect errors in the synthetic dataset. Human annotation demonstrates that Heimdall accurately identifies the errors in the dataset. The verification result also reveals that the quality of current synthetic dataset is poor, which is consistent with the authors' finding that removing the dataset from training improves the performance of the solver model [13]. + +In summary, our contributions are as follows: + +- We propose Heimdall, the long CoT verifier by reinforcement learning and demonstrate the superior accuracy than top-tier LLMs. Heimdall also shows good generalization ability on out-of-domain problems, such as math proof problems. +- We propose a unified algorithm called Pessimistic Verification for inference time scaling on problem solving. Empirically, it scales better than the vanilla Majority Voting or the reward-model based Best-of-N and achieve SOTA accuracy on AIME2025. +- We create a prototype to show the utility of Heimdall in the autonomous knowledge discovery, where Heimdall is used to identify the correctness of the problem-solution pairs synthesized by another LLM. Human evaluations show that Heimdall can effectively detect the flaws in the synthetic data. + +# 2 Related Work + +Reasoning model. Reasoning models outperform previous general-purpose models on challenging reasoning tasks. During the chain of thought(CoT) reasoning, they keep reflecting their claims and searching viable solutions, utilizing more compute budget and providing better and more robust results. OpenAI first released its reasoning models[2, 7, 30] that performs significantly better on competitive tests like AIME and CodeForces than its previous models. Work by DeepSeek[6] and Kimi[24] independently propose different ways of reinforcement learning to trigger the reflection and searching capability in their base models. Recently, Grok3 [4] and Gemini 2.5 Pro [3] also demonstrate their impressive reasoning capabilities through long CoT. + +Generative evaluation. Recently some work are interested in improving the verification ability of the LLMs. Some [8, 29, 32, 34] explores finetuning an LLM with synthetic verification data to improve its verification ability. However, it is hard to synthesize high-quality data if the LLM inherently lacks the verification skills. One of the related topic is LLM-as-a-Judge [5, 11] where a LLM is prompted to evaluate responses from other LLMs. The work[11] leverages strong LLMs as judges to evaluate other models in various domains and reveals that strong LLM judges have good generalization ability in different domains. Some works design the judge system and analyze of the judgment behavior[19, 21, 27]. However, prompting is only effective on easy tasks, and when it comes to competitive tests, the general purported reasoning models performs not quite well, as is reveals from our test in Section 4. Another similar topic is critique [9, 10, 14, 15, 18], which often focuses on code and math problems and is used for giving suggestions for further revision. Critique fine-tuning [25] shows that fine-tuning on a high-quality critique data is beneficial to the reasoning ability of a base model. Several work, e.g., CTRL [26] leverages RL to train LLMs. However, they do not leverage the long CoT ability, which limits the verification performance on complex reasoning problems. + +# 3 Approach + +We define the verification task where we ask a model to judge if a solution to a problem is correct in its CoT and finally put its judgment result at the end of the response. Table 1 is the template of the verification prompt and the expected format of a response. + +Here is a math problem and a solution of it. Think step by step and verify if the final answer in the solution is correct. The last line of your response should be of the form Answer: $Answer (without quotes) where $Answer is 1 if the final answer in the solution is correct and 0 if incorrect. + +```txt +**Problem** +${problem} +``` + +```txt +**Solution** +${solution} +``` + +Table 1 Prompt template for verification. + +# 3.1 Reinforcement learning for verification + +RL Setup. Let $\mathcal{D} = \{(p_i, s_i, y_i)\}_{i=1}^N$ be our dataset, where $p_i$ is a problem, $s_i$ is a solution to the problem, which may be the response from a reasoning model and $y_i \in \{0, 1\}$ represents the correctness of the solution, with 1 indicating correctness and 0 indicating incorrectness. Given a triplet $(p_i, s_i, y_i)$ , we prompt a LLM to check the correctness of the solution step-by-step and finally give a conclusion on the correctness, as is shown in Table 1. Denoting the prompt as $q_i$ , the verifier model $\pi_\theta(z_i, y_i'|q_i)$ takes a prompt as input and generates the CoT $z_i$ on judging the correctness of $y_i$ and at last gives a boolean conclusion $y$ if $s$ is correct. The outcome reward function $R$ is as follows: + +$$ +R (y, y ^ {\prime}) = \left\{ \begin{array}{l l} 1 & y = y ^ {\prime}, \\ - 1 & y \neq y ^ {\prime}. \end{array} \right. +$$ + +Then the objective of RL is, + +$$ +\mathcal {J} (\theta) = \mathbf {E} _ {(q, y) \sim \mathcal {D}, (z, y ^ {\prime}) \sim \pi_ {\theta} (q)} \left[ R (y, y ^ {\prime}) \right] +$$ + +We run the vanilla PPO algorithm on a reasoning model, and propose the following strategy for improvement. + +Data collection and filtering. We collect the dataset $\mathcal{D}$ by prompting one or multiple reasoning models to solve problems. For every problem in the dataset, we collect multiple solutions and construct a verification prompt with each solution using the template in Table 1. However, two cases may hurt the RL training, i.e., the extremely difficult problem, which we fail to sample any correct solutions and the extremely easy problems, which we fail to sample any wrong solutions. Such unbalanced data may teach the verifier to be biased on the difficulty of the problem, i.e., be optimistic on easy problems and pessimistic on difficult problems. Therefore, we do not include the data of the two cases in the training dataset. + +# 3.2 Solution selection by Pessimistic Verification + +When tackling challenging problems, one can sample multiple solutions and leverage the verifier to identify the most likely correct one. By sampling verification responses multiple times, we can achieve more reliable judgments, thereby improving overall problem-solving performance. We propose a principled and flexible method for the inference time scaling along the two dimensions, i.e., the amount of solutions sampled from the solver model and the amount of verifications sampled from a verifier model. Denote the number of solutions to a problem as $N$ and the number of verifications on each solution is $M$ . We initially conceptualize the selection process as a multi-arm bandit problem, where each arm corresponds to a distinct answer, and each verification constitutes a visit to an arm. The reward is the verification result, which can be either 1 or 0. Each time the solver generates a solution, the arm representing the solution's answer receives $M$ visits and immediately accrues $M$ rewards. The straight-forward approach is to calculate the average reward each arm receives as its score and greedily select the one with the highest score. However, for those with few visits, the score fluctuates + +and can be unreliable. Following the pessimism principle in RL, we introduce the lower-confidence-bound, which adds an uncertainty penalty to the score. Let $r_0, r_1, \ldots, r_K$ be the average reward of each answer and $N_0, N_1, \ldots, N_K$ be the number of solutions that drives to a certain answer. The selection algorithm is defined as: + +$$ +\hat {a} := \arg \max _ {a _ {i}} \left(r \left(a _ {i}\right) - \alpha \frac {\ln (N M)}{N _ {i} M + 1}\right) \tag {1} +$$ + +where the parameter $\alpha$ is a hyper-parameter that balances the consideration of uncertainty in the decision-making process and $\ln$ is the natural logarithm. Intuitively, the first term reflects the signals from the verifier, while the second term accounts for the bias of solver in the answer space. + +- When $N_{i}$ is small, the second term dominates, which neglects the verification and in the extreme case, it collapses to Majority Voting. +- When $N_{i}$ is large, the first term becomes more important, and in the extreme case, it simply selects the answer with the best verification score. + +The phase shift aligns with the fact that Majority Voting is trapped in the bias of the solver, for example when a wrong answer occurs more frequently than the correct one, and as $N$ and $M$ is large, the verification scores stabilize and we tend to trust more on it, because the verification is often easier than the solution. + +# 4 Experiments + +# 4.1 Dataset + +Our experiment is on the math problems. The training dataset comes from the AoPS website and official math competition homepages, similar to that of DAPO [28]. We leverage DeepSeek-R1-Distill-Qwen-32B model as the policy model to generate 16 solutions to each problem. We leverage a rule-based program to check if the final answer in the solution is correct, which compares the reference answer of a problem and the answer in the solution and outputs the label, i.e., 1 for a correct response and 0 for the incorrect response. Then we construct the verification dataset with the prompt template in Table 1. To keep the prompt clean and short, we remove the `` part in each solution and only use the summary part. + +We test the verification ability on both AIME2024 and AIME2025, 60 questions in total. During training, we monitor the performance on AIME2024 and select the best checkpoint as the final version of Heimdall. Therefore, one can treat AIME2024 as the validation dataset and AIME2025 as the test dataset. + +# 4.2 Scaling of verification + +Figure 2 shows the accuracy and the length of response tokens during RL training. As depicted by the blue curve, both accuracy and response length increase with the number of training steps, albeit at different rates. At the early stage, the accuracy improves rapidly, but the response length fluctuates. This is because a minor adjustment to the policy can significantly boost accuracy. Later, the response length grows constantly, while the accuracy gradually converges to $94.5\%$ , because the model is learning to tackle the hardest part in the training dataset, which requires the increasingly more reasoning tokens. The red curve represents the RL training without the data filtering strategy, i.e., incorporating both extreme cases of difficult and easy problems in the dataset. As training progresses, the performance gap becomes more pronounced, indicating that the absence of contrastive examples detrimentally impacts performance. In addition, we test o1-mini with the same evaluation data, which is shown as the dash line in the left of Figure 2. Our model outperforms o1-mini in fewer than 20 steps, indicating substantial potential for enhancing the verification capabilities of general-purpose reasoning models. + +We further look into cases to understand what Heimdall has learned during the training. Table 3 shows the verification of a correct solution to a hard problem in AIME2025. Due to space limitations, we only highlight some key points. We can observe two types of checking: + +- Forward checking. It checks if the reasoning chain in every step of the solution is correct, which is generally applicable to all problems. + +![](images/8b644b76187ca40820b4d0310eeca3a8a74118f248c5eab10b3dc3316d3bdd02.jpg) +Figure 2 Accuracy and response length during RL training. PPO w/o data filtering is the RL training with all problems in the dataset. Left: the accuracy on AIME2024 with the training steps. Right: the response length on the training dataset with the training steps. + +![](images/707053fdd7c511e4dae86189152a9f568239d545b75fac1df3d40b0dcfd37403.jpg) + +- Backward checking. It checks whether a conclusion, be it intermediate or final, fits the known constraints. For some types of problems like solving equations and finding the general term formula of a sequence, the backward checking is efficient and easy to implement. + +The case exemplifies the common task of deriving a general formula for a sequence. As illustrated, Heimdall applies both methods of validation to confirm the correctness of the solution. + +Next, we investigate how the verification ability scales as the number of verifications increases. We sample 64 solutions for each problem with the solver model and 64 verifications for each solution, resulting in a total of $30 \times 64 \times 64$ responses on either AIME2024 or AIME2025 dataset. Denoting the number of verifications of each solution as $N$ , we randomly select $N$ verifications for each solution from the data collected above, and determine the final score by some aggregation operation, e.g., Majority Voting and averaging. We repeat the process for 2048 times to eliminate any fluctuations in the statistics. Taking Majority Voting as the aggregation operation, we compute the accuracy, the false positive rate and the false negative rate at every compute budget $N$ , as is shown in the top of Figure 3. In addition, we take the average of $N$ scores, a decimal number in [0, 1], as the final score, and draw the curve of the AUC score in the bottom-left of Figure 3. It shows that Heimdall's performance can be significantly improved by simply repeat sampling more trajectories. As $N$ goes larger, the performance gradually converges to a upper limit. For example, the accuracy converges to about $97.5\%$ on AIME2024 and $96.0\%$ on AIME2025, and the remaining failure cases are the bias inherent in the model that could not be eliminated by adding more compute budget. We further analyze the distribution of those failure cases. For each problem, we calculate two statistics, one is the difficulty of solving it, which is estimated by the pass rate over its 64 solutions. and the other is the difficulty of verifying its solutions, which is estimated by the total number of verification failures on its solutions. Taking the two values as the x-axis and the y-axis respectively, we visualize their correlations in the bottom-right of Figure 3. We observe that the difficulty of a problem does not necessarily correlate to the difficulty of verifying its solutions. For example, Heimdall fails 17/64 times on a problem whose pass-rate is $67\%$ . + +In addition, a clear observation is that the performance of Heimdall on AIME2025 is generally worse than that on AIME2024. We believe the main reason is that the verifying solutions on AIME2025 is harder than that on AIME2024. One evidence is that, o1-mini achieves $80.9\%$ in AIME2024 and $75.3\%$ in AIME2025, whose degradation is larger than that of our model. + +# 4.3 Scaling of problem solving with verification + +In terms of problem solving, repeated sampling[1] is known to scale with the inference time compute. We evaluate multiple scaling algorithms, including both with and without the verifier. We analyze the inference + +![](images/fe886d189f024c5120df42a013683bee8b158b3fc37ba3ee60cf7194f46f1c8d.jpg) + +![](images/6f484e4be0cf4cc54219a65ea52ca98d6d801ffe275f018bf2ff3e34d42dd81f.jpg) + +![](images/695975f877c08159c982c176f2bf6d3900e61dc49ccd1d75fb0027e1967b18eb.jpg) +Figure 3 The inference-time scaling of verification ability on problem solutions in AIME2024 and AIME2025. Top-left: We show the accuracy of Heimdall when we sample multiple verification responses and make the judgment by majority voting. Top-right: We show the decreasing false-negative rate(FNR) and false-positive rate(FPR) as we scale up verification responses with majority voting. Bottom-left: We calculate the average score of verification responses and draw the AUC along each number of responses. Bottom-right: We collect the verification failure cases on every math problem and draw the relation between the difficulty of the problem and the number of verification failures, which reveals that the verification difficulty may not necessarily correlate with the difficulty of the original problem. + +![](images/88fa108dfa2c896417548184733701d2de540dc592cd6d34e1de0766047ec256.jpg) + +··· + +**Problem** + +Let the sequence of rationals $x_{1}, x_{2}, \ldots$ be defined such that $x_{1} = \frac{25}{11}$ and + +$$ +x _ {k + 1} = \frac {1}{3} \left(x _ {k} + \frac {1}{x _ {k}} - 1\right). +$$ + +$x_{2025}$ can be expressed as $\frac{m}{n}$ for relatively prime positive integers $m$ and $n$ . Find the remainder when $m + n$ is divided by 1000. + +**Solution** + +··· + +**Verification** + + + +Okay, so I need to figure out if the given solution to this problem is correct. Let me read through it again to make sure I understand. + +The problem is: ... + +The solution provided has four key steps. Let me go through each step and verify. + +Step 1: They let $x_{k} = a_{k} / b_{k}$ with $a_{k}$ and $b_{k}$ coprime. Then they write the recurrence relations as $a_{k + 1} = a_k^2 +b_k^2 -a_kb_k$ and $b_{k + 1} = 3a_kb_k$ . + +Wait, let's verify that. Starting from $x_{k + 1} = (1 / 3)(x_k + 1 / x_k - 1)$ . If $x_{k} = a_{k} / b_{k}$ , then $1 / x_{k} = b_{k} / a_{k}$ So $x_{k} + 1 / x_{k} = (a_{k}^{2} + b_{k}^{2}) / (a_{k}b_{k})$ . Okay, so step 1 seems correct. [Forward checking] + +··· + +Wait let me confirm with $S_{3}$ : $a_{3} = a_{2}^{2} + b_{2}^{2} - a_{2}b_{2}$ . Wait $a_{2} = 471$ , $b_{2} = 825$ . So $a_{3} = 471^{2} + 825^{2} - 471*825$ . [Backward checking] + + + +The solution correctly arrives at the conclusion despite some initial confusion in their explanation. They properly computed each part modulo 1000, leading to the correct final result. + +Answer: 1 + +Table 2 A case of verification on hard problems. The verification process contains both forward checking, i.e., checking if each step is correct and backward checking, i.e., checking if a conclusion matches constraints, as is shown in the position of the red texts. + +![](images/a6ceaaf54bd7ad5fb8518d4d94c967fe0a5ccc33696d9d663f524fd60bc4d369.jpg) +Figure 4 The inference-time scaling of problem solving with Heimdall. The two figures show the accuracy on AIME datasets as the number of solutions scales up. Left: the problem solving accuracy on AIME2025 dataset scales with the number of solutions. The colored shaded area represents the area covered by the accuracy curves of a selection algorithm as the number of verifications increases from 1 to 64. Right: the contour map of the accuracy of Pessimistic Verification as the number of solutions (x-axis) and the number of verifications (y-axis) increase. The red curve indicates the optimal configurations within various overall compute budgets. + +![](images/be88d7c5a6c41fb2c49cdc172f640ecb7aa221ceb85fa1108b2a5cc20ca5ec0d.jpg) + +time scaling in two dimensions, i.e., the number of solutions $N$ and the number of verifications on each solution $M$ . Considering the huge computational cost, we only evaluate on AIME2025, with $N \in [2, 256]$ and $M \in [1, 64]$ . Similar to the analysis of verification accuracy, we first sample $N = 256$ solutions for each problem and $M = 64$ verifications for each solution as the complete data and then randomly select a subset to evaluate each scaling algorithm under a specific compute budget $(M, N)$ . We repeat the sampling for 2048 time to get a stable average score. We set $\alpha = 0.1$ in Pessimistic Verification and also evaluate the other three selection algorithms as follows. + +Majority Voting Majority voting is one of the most commonly used inference time scaling methods. It first categorize the solutions, e.g., by the final answers for math problems. It simply selects the category that contains the largest number of solutions in it. As fore-mentioned, majority voting can be seen as a special case of Pessimistic verification, where $\alpha$ is large enough to overshadow the signal of verification. + +Shortest Majority Voting. The recent work [31] observes a length bias that the correct solutions are often shorter than incorrect ones for the same questions. Suppose the answer $a_i$ occurs $c_i$ times in the sampled responses and the average length of responses with the answer $a_i$ is $l_i$ , the voting score for $a_i$ is + +$$ +s _ {i} = \frac {c _ {i}}{l _ {i}} +$$ + +Sampling-based Search. The work [33] leverages a commercial LLM as the verifier, and scales the inference-time computation on the number of sampled solutions and the number of verifications. During the selection, it calculates the average verification score of each solution and selects the solution with the largest score. Note that it does not group the solutions based on their answers, which is different from the special case of Pessimistic verification where $\alpha$ equals zero. + +Tie-breaking rules. The selection algorithm may encounter a tie situation, where multiple options have the same score. In principle, one can introduce another model to compare which option is better, but for simplicity, we leverage the length prior to break the tie, namely, selecting the option with the shortest average solution length. + +The left of Figure 4 shows how the accuracy of different scaling algorithms changes with the number of solutions $N$ . Majority voting are the worst among all scaling algorithms under the same $N$ . By employing the length prior, Shortest Majority Voting gives a better accuracy when $N$ is small, but finally converges to $70\%$ , the same as that of Majority Voting. The red and blue areas represent the areas covered by the group of accuracy curves with $M \in [1,64]$ of Sampling-based Search and Pessimistic Verification respectively. As the figure shows, with the help of verification, the accuracy is significantly improved. In addition, Pessimistic Verification is better than Sampling-based Search when $M$ is small. The reason is that the verification process is inherently probabilities. Even for a easy task, it is still possible that a wrong solution is judged as correct and is finally selected as the final solution. With the second term in Equation 1, Pessimistic Verification penalizes such uncertain cases and favors those with more visits. As $M$ goes larger, the verification scores stabilize and the second term becomes smaller, and the gap between the two algorithms gets smaller. Interestingly, when $N$ is large, the gap is large again. By checking the typical cases, we find that it comes from the grouping of solutions. Pessimistic Verification aggregate the solutions with their answers while Sampling-based Search treats each solution independently. Two solutions may have the same final answer, but their approaches or expressions can be entirely different. The aggregation takes this variance into consideration, so makes more robust selection. Note that such grouping is not generally applicable, e.g., grouping the solutions of proof problems is not straightforward. In those cases, we expect the two algorithm converges to the same limit. + +Taking the accuracy of Pessimistic Verification as a function of $M$ and $N$ , we draw the contour map in the right of Figure 4. $M = 0$ represents the vanilla Majority Voting without verifications. We can see that increasing either $N$ or $M$ improves the performance. Each point $(M, N)$ involves $M$ responses by the solver model and $M \times N$ responses by the verifier, which is $M \times (N + 1)$ responses in total. By minimizing the overall budget, we derive the compute-optimal configurations for different compute budgets, with the constraint that $M$ and $N$ are non-negative integers. As is shown in the figure, we need to alternately increase $N$ and $M$ , but we should increase $N$ more frequently. The reason is that the correct answer for a hard problem is sparse. To get the problem solved with a scaling algorithm, we first need to give a sufficient budget to get the correct answer. + +Remaining room for improvement. Can Heimdall be better? The black dashed curve in the left of Figure 4 is Best-of-N that selects the response that equals the ground-truth answer, which is the upper-limit of any scaling algorithms. When $N$ is small, Pessimistic Verification Pessimistic Verification performs near the upper limit, but the gap widens as $N$ increases. Consider the configuration $M = 256$ , $N = 64$ . Pessimistic Verification gets a score of $83.3\%$ and the upper limit is $93.3\%$ , so the gap is 3 problems. Looking into the individual problems, we find that there are 4 problems that have only one correct solution among the 256 solutions. Heimdall manages to identify the correct solution on one of them, which is the case fore-mentioned in Table 3, but fails on the other three problems. The failed three problems involve spatial reasoning, which the base model of Heimdall is not very skilled at. We believe that as the ability of the base model becomes better, the verification ability can reach the upper limit. + +Coordination with other solver models. In the previous experiments, we use DeepSeek-R1-Distill-Qwen-32B as the solver model, which is the model to collect data during the training phase. To test Heimdall's generalization to other solver models, we test on DeepSeek-R1 and Gemini 2.5 Pro. For DeepSeek-R1, we directly extract the summary in its response, while for Gemini 2.5 Pro, we leverage another LLM to summarize its solution because we observe that its responses contains many reflections, which might confuse the verification process. Due to the limited compute budget, we set the total number of verifications to 16 and the total number of solving attempts to 64. We find that the verification accuracy on DeepSeek-R1 and Gemini 2.5 Pro is $90.1\%$ and $89.9\%$ , respectively, close to that on DeepSeek-R1-Distill-Qwen-32B. The scaling in problem-solving, as is shown in the right of Figure 1, indicates that Heimdall consistently improves the accuracy with various solver models and using stronger solver models results in higher accuracy. Meanwhile, the improvements for Gemini 2.5 Pro is smaller compared to those of the other models. The reason is that the accuracy is already high on AIME2025 and the base model of Heimdall inherently lacks some ability on certain problems. + +# 5 Generalization to math proof problems + +Heimdall is trained on math problems with explicit answers because it is easy to determine the correctness of the solution. However, there are many other types of problems, some of which might be more challenging. + +Here is a math problem and a solution of it. Think step by step and verify if each proof step in solution is correct. The last line of your response should be of the form Answer: $Answer (without quotes) where $Answer is 1 if the solution is correct and 0 if incorrect. + +**Problem** + +The n players of a hockey team gather to select their team captain. Initially, they stand in a circle, and each person votes for the person on their left. + +The players will update their votes via a series of rounds. In one round, each player updates their vote, one at a time, according to the following procedure: At the time of the update, if a is voting for b, and b is voting for c, then a updates their vote to c. (Note that a, b, and c need not be distinct; if $\mathrm{b} = \mathrm{c}$ , then a's vote does not change for this update.) Every player updates their vote exactly once in each round, in an order determined by the players (possibly different across different rounds). They repeat this updating procedure for n rounds. Prove that at this time, all n players will unanimously vote for the same person. + +\*\*Solution\*\* ... + +**Verification** + +Wait, but since the solution didn't present this reasoning, their proof is flawed. They presented an incorrect model of the process, leading to an overestimation of the rounds needed, but the conclusion is still correct. However, the problem is to verify if each step in their solution is correct. Since their step 4's reasoning is wrong, their proof is incorrect. + + + +The solution's step 4 incorrectly assumes that each person's vote jumps $n$ times, leading to covering the entire ring. The correct approach is to note that each round reduces the number of distinct targets by one, leading to convergence in $n-1$ rounds. Thus, while the conclusion is correct, the reasoning in step 4 is flawed. + +Answer: 0 + +Table 3 A case of verification on the math proof problems. The problem is P1 in Canadian Mathematical Olympiad 2025. We modify the prompt template to check the proof process rather than the final answer, which is the red text. Heimdall checks the proof step by step and finds that the solution lacks rigorous proofs in step 4. + +In this section, we would like to test Heimdall's capability in verifying mathematical proof problems. We select 10 proof problems from Mathematics Olympiad of different countries from the years 2024 and 2025, and leverage a solver model, i.e., DeepSeek-R1-Distill-Qwen-32B, to generate a proof process for each problem. Considering that the solver model is not good at spatial reasoning, we do not select geometry-related problems. Heimdall is then employed to check the correctness of each proof. Finally, we have experts evaluate both the proof processes and Heimdall's verifications. The solver model correctly solves 2 problems, while the remaining 8 are incorrect. To our surprise, Heimdall correctly judges 9/10 cases, identifying 2/2 correct proofs and detecting issues in 7/8 incorrect proofs. There is 1 problem where Heimdall fail to identify the error in the proof, resulting in a false-negative judgment. Looking into the specific cases, we find that Heimdall judges the correctness with both forward and backward checking, e.g., checking each step and testify with examples. It is capable of identifying most errors or unproven assumptions in the solution, but for some subtle problems, e.g., the assumption does not appear in the form of a proposition but is implicitly assumed during the proof process, Heimdall might fail. We believe that introducing the proof data in the RL training would improve the performance of Heimdall and an important direction in the future would be how to generate the dataset in large scale. + +# 6 Verification on automatic knowledge discovery + +In the process of human exploration of the unknown, some scientists pose questions, some propose solutions to these questions, and others focus on verifying the correctness of solutions provided by their predecessors. Verification itself, as a crucial part of knowledge discovery, ensures the correctness of new knowledge. In this section, we design a prototype that simulates the stages of posing questions and solving them, using the synthesis of math problems by NuminaMath [12], to evaluate Heimdall's effectiveness in detecting problematic knowledge. NuminaMath open-sources a comprehensive collection of 860,000 pairs of math problems and reference solutions. It includes 229,982 MATH-level pairs and 62,108 AMC-AIME-level pairs that are synthesized from seed problems in MATH and AMC-AIME training dataset. We test Heimdall on the harder one, i.e., the AMC-AIME dataset. Flaws can exist either within the problem itself, such as an unsolvable problem, or within the solution provided. Theoretically, by checking if the solution satisfies all the requirements in the problem, Heimdall can detect both flaws. Meanwhile, the task also indirectly tests the generalization capabilities of Heimdall, as the qualities of the problems in the training set is much higher. + +We randomly sample 8,192 pairs of questions and solutions as the test set. For each pair, we construct the prompt and query Heimdall 8 times. We calculate the sum of the verification scores, which ranges from 0 to 8 and illustrate the distribution in Figure 5. As is shown, near a half of the data is labeled incorrect with a high confidence, which is consistent with the experience listed in the latest NuminaMath-1.5 website [13] that by the ablation study, the authors find that this dataset hurts a bit the performance and plan to remove all synthetic data until they find a way to reliably generate high-quality synthetic problems. To measure the verification correctness, we randomly select 10 cases in the 0-scored group and manually check their correctness. We find that for all cases, the judgments by Heimdall are correct. Table 4 is a case of the problematic data and the verification. Due to space limitations, we retain only the essential information, with the rest omitted. The question does not have the correct answer among its options. Meanwhile, the solution mistakenly + +![](images/2f73438f093255b5510d6470fa706f84aab4ade82b7a97970530daabc2a053ef.jpg) +Figure 5 The distribution of verification scores on the problems of a synthetic dataset. The x-axis is the sum of scores across 8 verifications and the y-axis is the number of problems corresponding to each sum. + +uses an incorrect value in the equation and interestingly arrives at one of the given options. We hypothesize + +# Prompt: + +Here is a math problem and a solution of it. Think step by step and verify if the final answer in the solution is correct. ... + +**Problem** + +A rabbit starts jumping from the point $(2,3)$ on a rectangular board that extends from $(0,0)$ to $(6,5)$ . Each jump it makes is either left, right, up, or down, and has a length of 1 unit. Each jump direction is uniformly and randomly chosen. The jumping sequence continues until the rabbit reaches any border of the rectangle. Calculate the probability that the rabbit's sequence of jumps ends on the top or bottom side of the rectangle. + +(A). $\frac{1}{4}$ (B). $\frac{17}{32}$ (C). $\frac{17}{24}$ (D). $\frac{1}{3}$ (E). $\frac{3}{4}$ + +**Solution** + +Let $P_{(x,y)}$ denote the probability of the rabbit ending on the top or bottom side starting from $(x,y)$ . Using symmetry about the center line $y = 2.5$ and $x = 3$ , and recursive relations, we analyze the movements. + +··· + +Solving Equations: We solve this using recursive approaches and considering border cases explicitly. Assuming symmetry simplifies $P_{(3,2)} = P_{(3,3)}$ . + +Here, let's consider only two key points $(2,3)$ and $(3,3)$ for simplicity. Solving: + +$$ +P _ {(2, 3)} = \frac {1}{4} P _ {(2, 2)} + \frac {1}{4} (1) + \frac {1}{4} P _ {(1, 3)} + \frac {1}{4} P _ {(3, 3)} +$$ + +·· + +Thus, the rabbit's sequence of jumps ends on the top or bottom side with a probability of $\frac{17}{24}$ . The final answer is (C) $\boxed{\frac{17}{24}}$ + +# Response: + + + +··· + + + +The solution provided incorrectly assumes that $\mathrm{P}(2,4) = 1$ , which is not true since (2,4) is not a top or bottom border. They oversimplified the equations by not accounting for the correct dependencies of other points like $\mathrm{P}(2,4)$ and mishandled the symmetry in y-direction due to different boundary conditions $(\mathrm{P} = 0$ at $\mathrm{y} = 0$ and $\mathrm{P} = 1$ at $\mathrm{y} = 5$ ). + +Answer: 0 + +Table 4 An example of problematic synthetic data and the verification. The verification correctly points out the problem in the solution. + +that the task is somewhat challenging for the LLM used for synthetic data generation, leading the LLM to hallucinate a superficially consistent but wrong output. + +# 7 Future Work + +Firstly, the verification dataset is formed by simply extracting the summary part of a reasoning model, which as we observed is sometimes overly brief, e.g., driving to an answer with only one sentence. A more detailed explanation would make the verification easier. One can further improve the verification accuracy by summarizing the reasoning process with another LLM. Secondly, we mainly evaluate the verification ability on math problems that have final answers. There are many other types of tasks, e.g., coding problems and Mathematical proof problems. Although the learned ability is generalizable to other domains, we expect it beneficial to train with data in other domains. For example, in the context of coding problems, backward checking may take the alternative form of designing test cases. Lastly, we only prototype the usage of Heimdall in the automatic knowledge discovery. In real scenarios, posing valuable questions is a challenging task that demands both curiosity and keen insight. Such ability is often the critical part of the scientific discovery, which however is seldom investigated. We believe that as the general capabilities of LLM continues to advance, this direction will become more and more important. + +# 8 Conclusion + +In this paper, we propose to train a long CoT verifier called Heimdall with reinforcement learning. On the competitive math problems, Heimdall achieves high accuracy and scales well along both the length of reasoning chains and the number of repeated generation. Through human evaluation, we find that Heimdall also shows impressive generalization ability on out-of-domain problems, such as math proofs. We further propose the inference time scaling algorithm called Pessimistic Verification, which incorporates a solver and Heimdall for problem solving. By scaling up the compute, we can achieve the performance comparable to top-tier models on challenging math problems. Lastly, we design a prototype of automatic knowledge discovery and demonstrate that Heimdall can reliably detect flaws in the synthetic data from another LLM. + +# 9 Acknowledgments + +We thank the data annotation team for their expertise on collecting the evaluation data and analyzing the verification outputs, including Bocheng Zhou, Weijian Zhao, Tong Sun and Zhiyuan Zhang. + +# References + +[1] Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling. arXiv preprint arXiv:2407.21787, 2024. +[2] Ahmed El-Kishky, Alexander Wei, Andre Saraiva, Borys Minaev, Daniel Selsam, David Dohan, Francis Song, Hunter Lightman, Ignasi Clavera, Jakub Pachocki, et al. Competitive programming with large reasoning models. arXiv preprint arXiv:2502.06807, 2025. +[3] Google. Gemini 2.5: Our most intelligent ai model, 2025. URL https://blog.google/technology/google-deepmind/gemini-model-thinking-updates-march-2025/#gemini-2-5-thinking. Accessed: 2025-03-25. +[4] Grok. Grok 3 beta — the age of reasoning agents, 2025. URL https://x.ai/news/grok-3. Accessed: 2025-02-19. +[5] Jiawei Gu, Xuhui Jiang, Zhichao Shi, Hexiang Tan, Xuehao Zhai, Chengjin Xu, Wei Li, Yinghan Shen, Shengjie Ma, Honghao Liu, et al. A survey on lmm-as-a-judge. arXiv preprint arXiv:2411.15594, 2024. +[6] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025. +[7] Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024. +[8] Minki Kang, Jongwon Jeong, and Jaewoong Cho. T1: Tool-integrated self-verification for test-time compute scaling in small language models, 2025. URL https://arxiv.org/abs/2504.04718. +[9] Tian Lan, Wenwei Zhang, Chengqi Lyu, Shuaibin Li, Chen Xu, Heyan Huang, Dahua Lin, Xian-Ling Mao, and Kai Chen. Training language models to critique with multi-agent feedback. arXiv preprint arXiv:2410.15287, 2024. +[10] Tian Lan, Wenwei Zhang, Chen Xu, Heyan Huang, Dahua Lin, Kai Chen, and Xian-Ling Mao. Criticeval: Evaluating large-scale language model as critic. Advances in Neural Information Processing Systems, 37:66907-66960, 2024. +[11] Dawei Li, Bohan Jiang, Liangjie Huang, Alimohammad Beigi, Chengshuai Zhao, Zhen Tan, Amrita Bhattacharjee, Yuxuan Jiang, Canyu Chen, Tianhao Wu, et al. From generation to judgment: Opportunities and challenges of llm-as-a-judge. arXiv preprint arXiv:2411.16594, 2024. +[12] Jia Li, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Huang, Kashif Rasul, Longhui Yu, Albert Q Jiang, Ziju Shen, et al. Numinamath: The largest public dataset in ai4maths with 860k pairs of competition math problems and solutions. Hugging Face repository, 13:9, 2024. +[13] Jia LI, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Costa Huang, Kashif Rasul, Longhui Yu, Albert Jiang, Ziju Shen, Zihan Qin, Bin Dong, Li Zhou, Yann Fleureau, Guillaume Lample, and Stanislas Polu. Numinamath. [https://huggingface.co/AI-MO/NuminaMath-1.5](https://github.com/project-numina/aimo-progress-prize/blob/main/report/numina_dataset.pdf), 2024. +[14] Zicheng Lin, Zhibin Gou, Tian Liang, Ruilin Luo, Haowei Liu, and Yujiu Yang. Criticbench: Benchmarking llms for critique-correct reasoning. arXiv preprint arXiv:2402.14809, 2024. +[15] Liangchen Luo, Zi Lin, Yinxiao Liu, Lei Shu, Yun Zhu, Jingbo Shang, and Lei Meng. Critique ability of large language models. arXiv preprint arXiv:2310.04815, 2023. +[16] Ruotian Ma, Peisong Wang, Cheng Liu, Xingyan Liu, Jiaqi Chen, Bang Zhang, Xin Zhou, Nan Du, and Jia Li. S²r: Teaching llms to self-verify and self-correct via reinforcement learning. arXiv preprint arXiv:2502.12853, 2025. +[17] Dakota Mahan, Duy Van Phung, Rafael Rafailov, Chase Blagden, Nathan Lile, Louis Castricato, Jan-Philipp Franken, Chelsea Finn, and Alon Albalak. Generative reward models. arXiv preprint arXiv:2410.12832, 2024. +[18] Nat McAleese, Rai Michael Pokorny, Juan Felipe Ceron Uribe, Evgenia Nitishinskaya, Maja Trebacz, and Jan Leike. Llm critics help catch llm bugs. arXiv preprint arXiv:2407.00215, 2024. + +[19] Qian Pan, Zahra Ashktorab, Michael Desmond, Martin Santillan Cooper, James Johnson, Rahul Nair, Elizabeth Daly, and Werner Geyer. Human-centered design recommendations for lmm-as-a-judge. arXiv preprint arXiv:2407.03479, 2024. +[20] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017. +[21] Guijin Son, Hyunwoo Ko, Hoyoung Lee, Yewon Kim, and Seunghyeok Hong. Llm-as-a-judge & reward model: What they can and cannot do. arXiv preprint arXiv:2409.11239, 2024. +[22] Linzhuang Sun, Hao Liang, Jingxuan Wei, Bihui Yu, Tianpeng Li, Fan Yang, Zenan Zhou, and Wentao Zhang. Mm-verify: Enhancing multimodal reasoning with chain-of-thought verification. arXiv preprint arXiv:2502.13383, 2025. +[23] Rich Sutton. Verification, the key to ai. URL http://incompleteideas.net/IncIdeas/KeytoAI.html. +[24] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025. +[25] Yubo Wang, Xiang Yue, and Wenhu Chen. Critique fine-tuning: Learning to critique is more effective than learning to imitate. arXiv preprint arXiv:2501.17703, 2025. +[26] Zhihui Xie, Liyu Chen, Weichao Mao, Jingjing Xu, Lingpeng Kong, et al. Teaching language models to critique via reinforcement learning. arXiv preprint arXiv:2502.03492, 2025. +[27] Jiayi Ye, Yanbo Wang, Yue Huang, Dongping Chen, Qihui Zhang, Nuno Moniz, Tian Gao, Werner Geyer, Chao Huang, Pin-Yu Chen, et al. Justice or prejudice? quantifying biases in llm-as-a-judge. arXiv preprint arXiv:2410.02736, 2024. +[28] Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, et al. Dapo: An open-source llm reinforcement learning system at scale. arXiv preprint arXiv:2503.14476, 2025. +[29] Yue Yu, Zhengxing Chen, Aston Zhang, Liang Tan, Chenguang Zhu, Richard Yuanzhe Pang, Yundi Qian, Xuewei Wang, Suchin Gururangan, Chao Zhang, et al. Self-generated critiques boost reward modeling for language models. arXiv preprint arXiv:2411.16646, 2024. +[30] Wojciech Zaremba, Evgenia Nitishinskaya, Boaz Barak, Stephanie Lin, Sam Toyer, Yaodong Yu, Rachel Dias, Eric Wallace, Kai Xiao, Johannes Heidecke, et al. Trading inference-time compute for adversarial robustness. arXiv preprint arXiv:2501.18841, 2025. +[31] Zhiyuan Zeng, Qinyuan Cheng, Zhangyue Yin, Yunhua Zhou, and Xipeng Qiu. Revisiting the test-time scaling of o1-like models: Do they truly possess test-time scaling capabilities? arXiv preprint arXiv:2502.12215, 2025. +[32] Lunjun Zhang, Arian Hosseini, Hritik Bansal, Mehran Kazemi, Aviral Kumar, and Rishabh Agarwal. Generative verifiers: Reward modeling as next-token prediction. arXiv preprint arXiv:2408.15240, 2024. +[33] Eric Zhao, Pranjal Awasthi, and Sreenivas Gollapudi. Sample, scrutinize and scale: Effective inference-time search by scaling verification. arXiv preprint arXiv:2502.01839, 2025. +[34] Jian Zhao, Runze Liu, Kaiyan Zhang, Zhimu Zhou, Junqi Gao, Dong Li, Jiafei Lyu, Zhouyi Qian, Biqing Qi, Xiu Li, et al. Genprm: Scaling test-time compute of process reward models via generative reasoning. arXiv preprint arXiv:2504.00891, 2025. +[35] Jianyuan Zhong, Zeju Li, Zhijian Xu, Xiangyu Wen, and Qiang Xu. Dyve: Thinking fast and slow for dynamic process verification. arXiv preprint arXiv:2502.11157, 2025. \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10337/images/0191c427ea854db6b0b3df2857a2ba0d5b3133bacabb267a56124a118ed19753.jpg b/data/2025/2504_10xxx/2504.10337/images/0191c427ea854db6b0b3df2857a2ba0d5b3133bacabb267a56124a118ed19753.jpg new file mode 100644 index 0000000000000000000000000000000000000000..30cfc8fe5def4ef973066afae6bc05f1e1c8f909 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10337/images/0191c427ea854db6b0b3df2857a2ba0d5b3133bacabb267a56124a118ed19753.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4887150ae840d356409b0caa9a60ae1c349ae2e994bc9cefa134148700eb98cc +size 25529 diff --git a/data/2025/2504_10xxx/2504.10337/images/18a064ca0d0b73a8fb4229b47b7d2a7fe6336ab43b4b51442dac85fa57e6e4fd.jpg b/data/2025/2504_10xxx/2504.10337/images/18a064ca0d0b73a8fb4229b47b7d2a7fe6336ab43b4b51442dac85fa57e6e4fd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1c37d4cc2e1225bd3f29cd248d99fb137d826424 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10337/images/18a064ca0d0b73a8fb4229b47b7d2a7fe6336ab43b4b51442dac85fa57e6e4fd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e205ff6c4505393b224f82aed9cb4fba3f04fb08dcf02d797754746cf2d6380 +size 5988 diff --git a/data/2025/2504_10xxx/2504.10337/images/2f73438f093255b5510d6470fa706f84aab4ade82b7a97970530daabc2a053ef.jpg b/data/2025/2504_10xxx/2504.10337/images/2f73438f093255b5510d6470fa706f84aab4ade82b7a97970530daabc2a053ef.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7e84c0a8c555e58c72704c33a3b848e1260c98be --- /dev/null +++ b/data/2025/2504_10xxx/2504.10337/images/2f73438f093255b5510d6470fa706f84aab4ade82b7a97970530daabc2a053ef.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8a827fd1dbedeaf948865e97377cfa7d3390895912442bce1b5e08faa734569 +size 23345 diff --git a/data/2025/2504_10xxx/2504.10337/images/308ded485fd606aed1a42b50ec1801fa34711d88d5f660645d0d588ba71d7474.jpg b/data/2025/2504_10xxx/2504.10337/images/308ded485fd606aed1a42b50ec1801fa34711d88d5f660645d0d588ba71d7474.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b489632e52a313ba09087c88fb6e12239583e3d0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10337/images/308ded485fd606aed1a42b50ec1801fa34711d88d5f660645d0d588ba71d7474.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d0aa6b6c31e031c9331b1ef5e11f7e301dc2f0ba3951a1be8e6cc29bf4c3ed1 +size 7429 diff --git a/data/2025/2504_10xxx/2504.10337/images/30d330db2d527a1af853e6e69d2372cf6bfbece2f70a00fe911ae1cecaac7242.jpg b/data/2025/2504_10xxx/2504.10337/images/30d330db2d527a1af853e6e69d2372cf6bfbece2f70a00fe911ae1cecaac7242.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0d6a10cdfba4816a3b1ea957bd44795fe67f00e2 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10337/images/30d330db2d527a1af853e6e69d2372cf6bfbece2f70a00fe911ae1cecaac7242.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10a33f58ede60142a8450ff786046521b70c2da9d9402e04a611e04fa7fcfdf4 +size 4114 diff --git a/data/2025/2504_10xxx/2504.10337/images/50ba8d1f5ebc6b794706a38186df025e695a0e378c116cc3e91b5a1a9fbd269e.jpg b/data/2025/2504_10xxx/2504.10337/images/50ba8d1f5ebc6b794706a38186df025e695a0e378c116cc3e91b5a1a9fbd269e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ab64f72019ddd5a79ae25af8da841c75997e7d23 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10337/images/50ba8d1f5ebc6b794706a38186df025e695a0e378c116cc3e91b5a1a9fbd269e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86b8ab69e36492e6216431f0e007072c7faee3b7f98a0a52f130531c6d316342 +size 32451 diff --git a/data/2025/2504_10xxx/2504.10337/images/695975f877c08159c982c176f2bf6d3900e61dc49ccd1d75fb0027e1967b18eb.jpg b/data/2025/2504_10xxx/2504.10337/images/695975f877c08159c982c176f2bf6d3900e61dc49ccd1d75fb0027e1967b18eb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1e2decdc1a6edf5fc4321f23c36ca6ea255f34d1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10337/images/695975f877c08159c982c176f2bf6d3900e61dc49ccd1d75fb0027e1967b18eb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3776ec959c329ef970e783658b56c681b44576e691c9152c6507ad945c3907f +size 33006 diff --git a/data/2025/2504_10xxx/2504.10337/images/69acd5d7fcf2b3e74e76843cb7a1a4b47c6975f3bcd6412a620278d4168a9258.jpg b/data/2025/2504_10xxx/2504.10337/images/69acd5d7fcf2b3e74e76843cb7a1a4b47c6975f3bcd6412a620278d4168a9258.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2f556ef18fc29a164e1b494c521f698d42ab3a62 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10337/images/69acd5d7fcf2b3e74e76843cb7a1a4b47c6975f3bcd6412a620278d4168a9258.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:941e6a1fe07015019de9b25a0145a83aa6dd1b7c2dd206b4c0a02779619d886f +size 3817 diff --git a/data/2025/2504_10xxx/2504.10337/images/6f484e4be0cf4cc54219a65ea52ca98d6d801ffe275f018bf2ff3e34d42dd81f.jpg b/data/2025/2504_10xxx/2504.10337/images/6f484e4be0cf4cc54219a65ea52ca98d6d801ffe275f018bf2ff3e34d42dd81f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9f2b91ab392de3f2985f7d5c061e3191bd386eb8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10337/images/6f484e4be0cf4cc54219a65ea52ca98d6d801ffe275f018bf2ff3e34d42dd81f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b0c8b93f3b74891e811410fc94d0e8db2181a7ba0d5968203ec9074faa175ad +size 38057 diff --git a/data/2025/2504_10xxx/2504.10337/images/707053fdd7c511e4dae86189152a9f568239d545b75fac1df3d40b0dcfd37403.jpg b/data/2025/2504_10xxx/2504.10337/images/707053fdd7c511e4dae86189152a9f568239d545b75fac1df3d40b0dcfd37403.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ccfea91389659b3f4b78315769dcac4dcb94cc5f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10337/images/707053fdd7c511e4dae86189152a9f568239d545b75fac1df3d40b0dcfd37403.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f5946f8c8aa919c647a19da39fb91b7630ab0620b044b69a8978af5a9bc5bcf +size 37954 diff --git a/data/2025/2504_10xxx/2504.10337/images/75405d92afda4e012384671ae4988f611e859df9081c367e30324e7241d5f375.jpg b/data/2025/2504_10xxx/2504.10337/images/75405d92afda4e012384671ae4988f611e859df9081c367e30324e7241d5f375.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7cd8f882e8c3a2bd2cb032eafcedc58a610fd0a2 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10337/images/75405d92afda4e012384671ae4988f611e859df9081c367e30324e7241d5f375.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c2baf9dfb3c985ec4e039276c171eb6284b71862aa6642a213230f7e9fb2f82 +size 4947 diff --git a/data/2025/2504_10xxx/2504.10337/images/85f80f96d11583bcc91e763e2173e36cf0836b619f69f65fa73aa939cc41a9b8.jpg b/data/2025/2504_10xxx/2504.10337/images/85f80f96d11583bcc91e763e2173e36cf0836b619f69f65fa73aa939cc41a9b8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4e74717e04777e196bf9c34b4472f71ca7405825 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10337/images/85f80f96d11583bcc91e763e2173e36cf0836b619f69f65fa73aa939cc41a9b8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a784ffc9e82e9486c32512f4181861eb0ca44f3486678461603b949f27b2fe29 +size 22820 diff --git a/data/2025/2504_10xxx/2504.10337/images/88fa108dfa2c896417548184733701d2de540dc592cd6d34e1de0766047ec256.jpg b/data/2025/2504_10xxx/2504.10337/images/88fa108dfa2c896417548184733701d2de540dc592cd6d34e1de0766047ec256.jpg new file mode 100644 index 0000000000000000000000000000000000000000..634be71fc2fd3c768a9518aaed9be19f96c12d05 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10337/images/88fa108dfa2c896417548184733701d2de540dc592cd6d34e1de0766047ec256.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:920c828dcf9d57fd9e122c275cbbbefd830ff012c3e98fb9dac732f73aa3ad72 +size 30245 diff --git a/data/2025/2504_10xxx/2504.10337/images/8b644b76187ca40820b4d0310eeca3a8a74118f248c5eab10b3dc3316d3bdd02.jpg b/data/2025/2504_10xxx/2504.10337/images/8b644b76187ca40820b4d0310eeca3a8a74118f248c5eab10b3dc3316d3bdd02.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e398419055a4ad5cda573d55ddfd8d016357b69d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10337/images/8b644b76187ca40820b4d0310eeca3a8a74118f248c5eab10b3dc3316d3bdd02.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb57130e76b324e32c792f0fc235b0ea1e7364710059dc26b83e1fae9277bb7e +size 33552 diff --git a/data/2025/2504_10xxx/2504.10337/images/a476a87e3ba114e924d43e8cd89486842f35e498c586712ba4ffa2219ac91973.jpg b/data/2025/2504_10xxx/2504.10337/images/a476a87e3ba114e924d43e8cd89486842f35e498c586712ba4ffa2219ac91973.jpg new file mode 100644 index 0000000000000000000000000000000000000000..adf0f9775ebb2322b3fe0128f951479baf79aeb9 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10337/images/a476a87e3ba114e924d43e8cd89486842f35e498c586712ba4ffa2219ac91973.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4526091d8972eab184ba6f641a5cda92aeb6d1adc73f1a7fa9c9835e27ddff2 +size 1593 diff --git a/data/2025/2504_10xxx/2504.10337/images/a6ceaaf54bd7ad5fb8518d4d94c967fe0a5ccc33696d9d663f524fd60bc4d369.jpg b/data/2025/2504_10xxx/2504.10337/images/a6ceaaf54bd7ad5fb8518d4d94c967fe0a5ccc33696d9d663f524fd60bc4d369.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fcf33c2060997bfb91bcc8de2b6b8a4a90d7b465 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10337/images/a6ceaaf54bd7ad5fb8518d4d94c967fe0a5ccc33696d9d663f524fd60bc4d369.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a51a941a961f4d4893c9725cb58aee6b506637e4fcd39b878ab9a549e1d17985 +size 43117 diff --git a/data/2025/2504_10xxx/2504.10337/images/be88d7c5a6c41fb2c49cdc172f640ecb7aa221ceb85fa1108b2a5cc20ca5ec0d.jpg b/data/2025/2504_10xxx/2504.10337/images/be88d7c5a6c41fb2c49cdc172f640ecb7aa221ceb85fa1108b2a5cc20ca5ec0d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..044d32d1a149ecaab5ba431ff2ef644969e23b18 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10337/images/be88d7c5a6c41fb2c49cdc172f640ecb7aa221ceb85fa1108b2a5cc20ca5ec0d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aaa16e8509cb3b91151c00ccf5f8c768179da5e180c99904288db8a927422ce8 +size 60292 diff --git a/data/2025/2504_10xxx/2504.10337/images/fe886d189f024c5120df42a013683bee8b158b3fc37ba3ee60cf7194f46f1c8d.jpg b/data/2025/2504_10xxx/2504.10337/images/fe886d189f024c5120df42a013683bee8b158b3fc37ba3ee60cf7194f46f1c8d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..810d5b41a495fdbcb3f3b9a5e038c83440b64445 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10337/images/fe886d189f024c5120df42a013683bee8b158b3fc37ba3ee60cf7194f46f1c8d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fd7c1c999d9afc3641594072482b24ebad7cc2e12953eed7bb0919ee6ef234b +size 31313 diff --git a/data/2025/2504_10xxx/2504.10337/layout.json b/data/2025/2504_10xxx/2504.10337/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..c2633986a4ede4cb34df5e6b28e39a38b51a004e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10337/layout.json @@ -0,0 +1,9932 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 68, + 101, + 542, + 123 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 101, + 542, + 123 + ], + "spans": [ + { + "bbox": [ + 68, + 101, + 542, + 123 + ], + "type": "text", + "content": "Heimdall: test-time scaling on the generative verification" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 244, + 153, + 365, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 153, + 365, + 168 + ], + "spans": [ + { + "bbox": [ + 244, + 153, + 365, + 168 + ], + "type": "text", + "content": "Wenlei Shi, Xing Jin" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 263, + 177, + 348, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 263, + 177, + 348, + 190 + ], + "spans": [ + { + "bbox": [ + 263, + 177, + 348, + 190 + ], + "type": "text", + "content": "ByteDance Seed" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 277, + 232, + 334, + 246 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 277, + 232, + 334, + 246 + ], + "spans": [ + { + "bbox": [ + 277, + 232, + 334, + 246 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 91, + 255, + 518, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 255, + 518, + 494 + ], + "spans": [ + { + "bbox": [ + 91, + 255, + 518, + 494 + ], + "type": "text", + "content": "An AI system can create and maintain knowledge only to the extent that it can verify that knowledge itself [23]. Recent work on long Chain-of-Thought reasoning has demonstrated great potential of LLMs on solving competitive problems, but their verification ability remains to be weak and not sufficiently investigated. In this paper, we propose Heimdall, the long CoT verification LLM that can accurately judge the correctness of solutions. With pure reinforcement learning, we boost the verification accuracy from " + }, + { + "bbox": [ + 91, + 255, + 518, + 494 + ], + "type": "inline_equation", + "content": "62.5\\%" + }, + { + "bbox": [ + 91, + 255, + 518, + 494 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 91, + 255, + 518, + 494 + ], + "type": "inline_equation", + "content": "94.5\\%" + }, + { + "bbox": [ + 91, + 255, + 518, + 494 + ], + "type": "text", + "content": " on competitive math problems. By scaling with repeated sampling, the accuracy further increases to " + }, + { + "bbox": [ + 91, + 255, + 518, + 494 + ], + "type": "inline_equation", + "content": "97.5\\%" + }, + { + "bbox": [ + 91, + 255, + 518, + 494 + ], + "type": "text", + "content": ". Through human evaluation, Heimdall demonstrates impressive generalization capabilities, successfully detecting most issues in challenging math proofs, the type of which is not included during training. Furthermore, we propose Pessimistic Verification to extend the functionality of Heimdall to scaling up the problem solving. It calls Heimdall to judge the solutions from a solver model and based on the pessimistic principle, selects the most likely correct solution with the least uncertainty. Taking DeepSeek-R1-Distill-Qwen-32B as the solver model, Pessimistic Verification improves the solution accuracy on AIME2025 from " + }, + { + "bbox": [ + 91, + 255, + 518, + 494 + ], + "type": "inline_equation", + "content": "54.2\\%" + }, + { + "bbox": [ + 91, + 255, + 518, + 494 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 91, + 255, + 518, + 494 + ], + "type": "inline_equation", + "content": "70.0\\%" + }, + { + "bbox": [ + 91, + 255, + 518, + 494 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 91, + 255, + 518, + 494 + ], + "type": "inline_equation", + "content": "16\\times" + }, + { + "bbox": [ + 91, + 255, + 518, + 494 + ], + "type": "text", + "content": " compute budget and to " + }, + { + "bbox": [ + 91, + 255, + 518, + 494 + ], + "type": "inline_equation", + "content": "83.3\\%" + }, + { + "bbox": [ + 91, + 255, + 518, + 494 + ], + "type": "text", + "content": " with more compute budget. With the stronger solver Gemini 2.5 Pro, the score reaches " + }, + { + "bbox": [ + 91, + 255, + 518, + 494 + ], + "type": "inline_equation", + "content": "93.0\\%" + }, + { + "bbox": [ + 91, + 255, + 518, + 494 + ], + "type": "text", + "content": ". Finally, we prototype an automatic knowledge discovery system, a ternary system where one poses questions, another provides solutions, and the third verifies the solutions. Using the data synthesis work NuminaMath [13] for the first two components, Heimdall effectively identifies problematic records within the dataset and reveals that nearly half of the data is flawed, which interestingly aligns with the recent ablation studies from NuminaMath." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 92, + 502, + 178, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 502, + 178, + 514 + ], + "spans": [ + { + "bbox": [ + 92, + 502, + 178, + 514 + ], + "type": "text", + "content": "Date: April 17, 2025" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 92, + 514, + 501, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 514, + 501, + 526 + ], + "spans": [ + { + "bbox": [ + 92, + 514, + 501, + 526 + ], + "type": "text", + "content": "Correspondence: Wenlei Shi at wenlei.shi@bytedance.com, Xing Jin at jinxing.9@bytedance.com" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 573, + 162, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 573, + 162, + 586 + ], + "spans": [ + { + "bbox": [ + 67, + 573, + 162, + 586 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 66, + 597, + 543, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 597, + 543, + 717 + ], + "spans": [ + { + "bbox": [ + 66, + 597, + 543, + 717 + ], + "type": "text", + "content": "In the realm of scientific and mathematical discovery, the process of logistic verification and validation is as crucial as the initial act of problem-solving. One of the most illustrative examples of this principle can be found in the famous thought experiment 'chasing a beam of light' by Albert Einstein, where he found the paradox within the established physics theories and further formulated the principle of the constancy of the speed of light, a cornerstone of his Special Theory of Relativity. Recently, the problem solving ability of LLMs have been significantly improved. With the long Chain of Thought(CoT) reasoning, advanced LLMs are now able to effectively solve complex competition-level problems in both math and code domains. However, the verification ability of LLMs has not been sufficiently investigated. On one hand, although the intelligence of general purported LLM increases rapidly with the long CoT capabilities, we find that current SOTA models with direct prompting [5, 21] are not good at verifications on complex problems, e.g., o1-mini only achieves" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 67, + 50, + 223, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 50, + 223, + 69 + ], + "spans": [ + { + "bbox": [ + 67, + 50, + 223, + 69 + ], + "type": "text", + "content": "ByteDance | Seed" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 14, + 214, + 37, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 214, + 37, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 214, + 37, + 559 + ], + "type": "text", + "content": "arXiv:2504.10337v2 [cs.AI] 16 Apr 2025" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 70, + 99, + 225, + 244 + ], + "blocks": [ + { + "bbox": [ + 70, + 99, + 225, + 244 + ], + "lines": [ + { + "bbox": [ + 70, + 99, + 225, + 244 + ], + "spans": [ + { + "bbox": [ + 70, + 99, + 225, + 244 + ], + "type": "image", + "image_path": "0191c427ea854db6b0b3df2857a2ba0d5b3133bacabb267a56124a118ed19753.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 255, + 542, + 333 + ], + "lines": [ + { + "bbox": [ + 67, + 255, + 542, + 333 + ], + "spans": [ + { + "bbox": [ + 67, + 255, + 542, + 333 + ], + "type": "text", + "content": "Figure 1 Scaling of Heimdall. Left: the verification accuracy scales with the response length during RL training. With more reasoning tokens, Heimdall gives more accurate judgment on the solutions on AIME2024. Middle: the verification accuracy scales with repeated sampling and Majority Voting. By sampling multiple verification trajectories and voting, the accuracy can be further improved. Right: with Heimdall scoring the solutions on AIME2025, the problem solving accuracy scales with the number of solutions. We verify 16 times on each solution and select the most likely correct one with Pessimistic Verification " + }, + { + "bbox": [ + 67, + 255, + 542, + 333 + ], + "type": "inline_equation", + "content": "(\\times 16)" + }, + { + "bbox": [ + 67, + 255, + 542, + 333 + ], + "type": "text", + "content": ". When inter-playing with various solver models, Heimdall gives significant improvements over pure solver-based Majority Voting(MV)." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 227, + 99, + 383, + 245 + ], + "blocks": [ + { + "bbox": [ + 227, + 99, + 383, + 245 + ], + "lines": [ + { + "bbox": [ + 227, + 99, + 383, + 245 + ], + "spans": [ + { + "bbox": [ + 227, + 99, + 383, + 245 + ], + "type": "image", + "image_path": "85f80f96d11583bcc91e763e2173e36cf0836b619f69f65fa73aa939cc41a9b8.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 383, + 99, + 539, + 244 + ], + "blocks": [ + { + "bbox": [ + 383, + 99, + 539, + 244 + ], + "lines": [ + { + "bbox": [ + 383, + 99, + 539, + 244 + ], + "spans": [ + { + "bbox": [ + 383, + 99, + 539, + 244 + ], + "type": "image", + "image_path": "50ba8d1f5ebc6b794706a38186df025e695a0e378c116cc3e91b5a1a9fbd269e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 354, + 541, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 354, + 541, + 390 + ], + "spans": [ + { + "bbox": [ + 67, + 354, + 541, + 390 + ], + "type": "text", + "content": "80.9% on our evaluation dataset. On the other hand, some work [16, 17, 22, 25, 29, 32, 35] trains a dedicated model for verification or critique but the high-quality verification data is hard to collect, which limits the verification capability and hence impedes the application to challenging problems." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 396, + 541, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 396, + 541, + 588 + ], + "spans": [ + { + "bbox": [ + 67, + 396, + 541, + 588 + ], + "type": "text", + "content": "In this paper, we claim that verifying if a solution is correct is a special type of problem solving, i.e., a true/false question and involves step-by-step judgment on the solution. Inspired by the recent progress on the long CoT reasoning, we propose to train a long CoT verifier through reinforcement learning. We name it Heimdall, symbolizing its sharp ability to detect errors and safeguards the correctness of knowledge. We leverage PPO [20] algorithm and find that the data processing is critical to the RL training. Specifically, two types of problems hinder the optimization, i.e., easy problems with only correct solutions and hard problems with only wrong solutions, both of which lack contrastive examples and tends to guide the verifier to simply identify the hardness of a problem, rather than finding the wrong position in the solution. By filtering out the two cases, the model learns the verification ability more effectively. Taking the competitive math problems as our primary experimental domain, we show that the verification ability follows the test time scaling law where the accuracy improves significantly from " + }, + { + "bbox": [ + 67, + 396, + 541, + 588 + ], + "type": "inline_equation", + "content": "62.5\\%" + }, + { + "bbox": [ + 67, + 396, + 541, + 588 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 67, + 396, + 541, + 588 + ], + "type": "inline_equation", + "content": "94.5\\%" + }, + { + "bbox": [ + 67, + 396, + 541, + 588 + ], + "type": "text", + "content": " as the response length grows, as is shown the left of Figure 1. The performance can be further improved by sampling multiple verifications and voting on the judgment results. In the middle of Figure 1, the accuracy grows from " + }, + { + "bbox": [ + 67, + 396, + 541, + 588 + ], + "type": "inline_equation", + "content": "94.5\\%" + }, + { + "bbox": [ + 67, + 396, + 541, + 588 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 67, + 396, + 541, + 588 + ], + "type": "inline_equation", + "content": "97.5\\%" + }, + { + "bbox": [ + 67, + 396, + 541, + 588 + ], + "type": "text", + "content": " on AIME2024 as the number of verifications grows from 2 to 64. Furthermore, the evaluation from human experts shows that Heimdall generalizes well on math proof problems although it is trained with only the calculation problems with explicit answers." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 594, + 541, + 737 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 594, + 541, + 737 + ], + "spans": [ + { + "bbox": [ + 67, + 594, + 541, + 737 + ], + "type": "text", + "content": "In addition, we extend the usage of Heimdall to scale up the problem solving. Suppose the solver model gives multiple solutions for a problem and Heimdall judges the correctness of each solution for multiple times. We can select the best solution based on the verification results. We frame the selection process as a multi-arm bandit problem where solutions with the same conclusion are treated as multiple visits to the same 'arm'. Based on the pessimism principle, we propose the solution selection algorithm called Pessimistic Verification that minimizes the uncertainty of selecting wrong solutions. The algorithm unifies Majority Voting and reward model based Best-of-N by balancing the contributions of the solver and the verifier, and empirically demonstrates better scaling over both algorithms. Taking DeepSeek-R1-Distill-Qwen-32B [6] as the solver model, which scores 54 on AIME2025, Pessimistic Verification raises the scores to 70 with " + }, + { + "bbox": [ + 67, + 594, + 541, + 737 + ], + "type": "inline_equation", + "content": "\\times 16" + }, + { + "bbox": [ + 67, + 594, + 541, + 737 + ], + "type": "text", + "content": " compute, matching the performance of o1, and to 83.3 with more compute. We further test with stronger solver models, including DeepSeek-R1 [6] and Gemini 2.5 Pro[3]. As is shown in the right of Figure 1, Pessimistic Verification with Heimdall consistently improves the problem solving of various models and with Gemini 2.5 Pro, the" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 763, + 309, + 772 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 763, + 309, + 772 + ], + "spans": [ + { + "bbox": [ + 302, + 763, + 309, + 772 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 98, + 543, + 124 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 98, + 543, + 124 + ], + "spans": [ + { + "bbox": [ + 67, + 98, + 543, + 124 + ], + "type": "text", + "content": "accuracy on AIME2025 reaches " + }, + { + "bbox": [ + 67, + 98, + 543, + 124 + ], + "type": "inline_equation", + "content": "93\\%" + }, + { + "bbox": [ + 67, + 98, + 543, + 124 + ], + "type": "text", + "content": ", matching the currently reported SOTA with multiple attempts by Grok3 [4]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 128, + 543, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 128, + 543, + 201 + ], + "spans": [ + { + "bbox": [ + 67, + 128, + 543, + 201 + ], + "type": "text", + "content": "Finally, we create a prototype to demonstrate the utility of Heimdall on the automatic knowledge discovery. We use the work of math data synthesis called NuminaMath [12] as the procedure of automatically proposing new problems and the corresponding solutions, and call Heimdall to detect errors in the synthetic dataset. Human annotation demonstrates that Heimdall accurately identifies the errors in the dataset. The verification result also reveals that the quality of current synthetic dataset is poor, which is consistent with the authors' finding that removing the dataset from training improves the performance of the solver model [13]." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 206, + 269, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 206, + 269, + 217 + ], + "spans": [ + { + "bbox": [ + 67, + 206, + 269, + 217 + ], + "type": "text", + "content": "In summary, our contributions are as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 82, + 224, + 541, + 342 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 82, + 224, + 541, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 224, + 541, + 259 + ], + "spans": [ + { + "bbox": [ + 82, + 224, + 541, + 259 + ], + "type": "text", + "content": "- We propose Heimdall, the long CoT verifier by reinforcement learning and demonstrate the superior accuracy than top-tier LLMs. Heimdall also shows good generalization ability on out-of-domain problems, such as math proof problems." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 82, + 266, + 541, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 266, + 541, + 301 + ], + "spans": [ + { + "bbox": [ + 82, + 266, + 541, + 301 + ], + "type": "text", + "content": "- We propose a unified algorithm called Pessimistic Verification for inference time scaling on problem solving. Empirically, it scales better than the vanilla Majority Voting or the reward-model based Best-of-N and achieve SOTA accuracy on AIME2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 82, + 308, + 541, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 308, + 541, + 342 + ], + "spans": [ + { + "bbox": [ + 82, + 308, + 541, + 342 + ], + "type": "text", + "content": "- We create a prototype to show the utility of Heimdall in the autonomous knowledge discovery, where Heimdall is used to identify the correctness of the problem-solution pairs synthesized by another LLM. Human evaluations show that Heimdall can effectively detect the flaws in the synthetic data." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 356, + 171, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 356, + 171, + 369 + ], + "spans": [ + { + "bbox": [ + 67, + 356, + 171, + 369 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 380, + 543, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 380, + 543, + 464 + ], + "spans": [ + { + "bbox": [ + 67, + 380, + 543, + 464 + ], + "type": "text", + "content": "Reasoning model. Reasoning models outperform previous general-purpose models on challenging reasoning tasks. During the chain of thought(CoT) reasoning, they keep reflecting their claims and searching viable solutions, utilizing more compute budget and providing better and more robust results. OpenAI first released its reasoning models[2, 7, 30] that performs significantly better on competitive tests like AIME and CodeForces than its previous models. Work by DeepSeek[6] and Kimi[24] independently propose different ways of reinforcement learning to trigger the reflection and searching capability in their base models. Recently, Grok3 [4] and Gemini 2.5 Pro [3] also demonstrate their impressive reasoning capabilities through long CoT." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 469, + 543, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 469, + 543, + 626 + ], + "spans": [ + { + "bbox": [ + 67, + 469, + 543, + 626 + ], + "type": "text", + "content": "Generative evaluation. Recently some work are interested in improving the verification ability of the LLMs. Some [8, 29, 32, 34] explores finetuning an LLM with synthetic verification data to improve its verification ability. However, it is hard to synthesize high-quality data if the LLM inherently lacks the verification skills. One of the related topic is LLM-as-a-Judge [5, 11] where a LLM is prompted to evaluate responses from other LLMs. The work[11] leverages strong LLMs as judges to evaluate other models in various domains and reveals that strong LLM judges have good generalization ability in different domains. Some works design the judge system and analyze of the judgment behavior[19, 21, 27]. However, prompting is only effective on easy tasks, and when it comes to competitive tests, the general purported reasoning models performs not quite well, as is reveals from our test in Section 4. Another similar topic is critique [9, 10, 14, 15, 18], which often focuses on code and math problems and is used for giving suggestions for further revision. Critique fine-tuning [25] shows that fine-tuning on a high-quality critique data is beneficial to the reasoning ability of a base model. Several work, e.g., CTRL [26] leverages RL to train LLMs. However, they do not leverage the long CoT ability, which limits the verification performance on complex reasoning problems." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 637, + 149, + 651 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 637, + 149, + 651 + ], + "spans": [ + { + "bbox": [ + 67, + 637, + 149, + 651 + ], + "type": "text", + "content": "3 Approach" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 661, + 542, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 661, + 542, + 698 + ], + "spans": [ + { + "bbox": [ + 67, + 661, + 542, + 698 + ], + "type": "text", + "content": "We define the verification task where we ask a model to judge if a solution to a problem is correct in its CoT and finally put its judgment result at the end of the response. Table 1 is the template of the verification prompt and the expected format of a response." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 762, + 309, + 772 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 762, + 309, + 772 + ], + "spans": [ + { + "bbox": [ + 302, + 762, + 309, + 772 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 78, + 107, + 530, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 107, + 530, + 144 + ], + "spans": [ + { + "bbox": [ + 78, + 107, + 530, + 144 + ], + "type": "text", + "content": "Here is a math problem and a solution of it. Think step by step and verify if the final answer in the solution is correct. The last line of your response should be of the form Answer: $Answer (without quotes) where $Answer is 1 if the final answer in the solution is correct and 0 if incorrect." + } + ] + } + ], + "index": 0 + }, + { + "type": "code", + "bbox": [ + 78, + 155, + 138, + 179 + ], + "blocks": [ + { + "bbox": [ + 78, + 155, + 138, + 179 + ], + "lines": [ + { + "bbox": [ + 78, + 155, + 138, + 179 + ], + "spans": [ + { + "bbox": [ + 78, + 155, + 138, + 179 + ], + "type": "text", + "content": "**Problem**\n${problem}" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 79, + 190, + 137, + 216 + ], + "blocks": [ + { + "bbox": [ + 79, + 190, + 137, + 216 + ], + "lines": [ + { + "bbox": [ + 79, + 190, + 137, + 216 + ], + "spans": [ + { + "bbox": [ + 79, + 190, + 137, + 216 + ], + "type": "text", + "content": "**Solution**\n${solution}" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 218, + 236, + 390, + 247 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 218, + 236, + 390, + 247 + ], + "spans": [ + { + "bbox": [ + 218, + 236, + 390, + 247 + ], + "type": "text", + "content": "Table 1 Prompt template for verification." + } + ] + } + ], + "index": 3, + "type": "text" + }, + { + "bbox": [ + 67, + 268, + 307, + 282 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 268, + 307, + 282 + ], + "spans": [ + { + "bbox": [ + 67, + 268, + 307, + 282 + ], + "type": "text", + "content": "3.1 Reinforcement learning for verification" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 289, + 544, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 289, + 544, + 373 + ], + "spans": [ + { + "bbox": [ + 67, + 289, + 544, + 373 + ], + "type": "text", + "content": "RL Setup. Let " + }, + { + "bbox": [ + 67, + 289, + 544, + 373 + ], + "type": "inline_equation", + "content": "\\mathcal{D} = \\{(p_i, s_i, y_i)\\}_{i=1}^N" + }, + { + "bbox": [ + 67, + 289, + 544, + 373 + ], + "type": "text", + "content": " be our dataset, where " + }, + { + "bbox": [ + 67, + 289, + 544, + 373 + ], + "type": "inline_equation", + "content": "p_i" + }, + { + "bbox": [ + 67, + 289, + 544, + 373 + ], + "type": "text", + "content": " is a problem, " + }, + { + "bbox": [ + 67, + 289, + 544, + 373 + ], + "type": "inline_equation", + "content": "s_i" + }, + { + "bbox": [ + 67, + 289, + 544, + 373 + ], + "type": "text", + "content": " is a solution to the problem, which may be the response from a reasoning model and " + }, + { + "bbox": [ + 67, + 289, + 544, + 373 + ], + "type": "inline_equation", + "content": "y_i \\in \\{0, 1\\}" + }, + { + "bbox": [ + 67, + 289, + 544, + 373 + ], + "type": "text", + "content": " represents the correctness of the solution, with 1 indicating correctness and 0 indicating incorrectness. Given a triplet " + }, + { + "bbox": [ + 67, + 289, + 544, + 373 + ], + "type": "inline_equation", + "content": "(p_i, s_i, y_i)" + }, + { + "bbox": [ + 67, + 289, + 544, + 373 + ], + "type": "text", + "content": ", we prompt a LLM to check the correctness of the solution step-by-step and finally give a conclusion on the correctness, as is shown in Table 1. Denoting the prompt as " + }, + { + "bbox": [ + 67, + 289, + 544, + 373 + ], + "type": "inline_equation", + "content": "q_i" + }, + { + "bbox": [ + 67, + 289, + 544, + 373 + ], + "type": "text", + "content": ", the verifier model " + }, + { + "bbox": [ + 67, + 289, + 544, + 373 + ], + "type": "inline_equation", + "content": "\\pi_\\theta(z_i, y_i'|q_i)" + }, + { + "bbox": [ + 67, + 289, + 544, + 373 + ], + "type": "text", + "content": " takes a prompt as input and generates the CoT " + }, + { + "bbox": [ + 67, + 289, + 544, + 373 + ], + "type": "inline_equation", + "content": "z_i" + }, + { + "bbox": [ + 67, + 289, + 544, + 373 + ], + "type": "text", + "content": " on judging the correctness of " + }, + { + "bbox": [ + 67, + 289, + 544, + 373 + ], + "type": "inline_equation", + "content": "y_i" + }, + { + "bbox": [ + 67, + 289, + 544, + 373 + ], + "type": "text", + "content": " and at last gives a boolean conclusion " + }, + { + "bbox": [ + 67, + 289, + 544, + 373 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 67, + 289, + 544, + 373 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 67, + 289, + 544, + 373 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 67, + 289, + 544, + 373 + ], + "type": "text", + "content": " is correct. The outcome reward function " + }, + { + "bbox": [ + 67, + 289, + 544, + 373 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 67, + 289, + 544, + 373 + ], + "type": "text", + "content": " is as follows:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 249, + 380, + 360, + 412 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 249, + 380, + 360, + 412 + ], + "spans": [ + { + "bbox": [ + 249, + 380, + 360, + 412 + ], + "type": "interline_equation", + "content": "R (y, y ^ {\\prime}) = \\left\\{ \\begin{array}{l l} 1 & y = y ^ {\\prime}, \\\\ - 1 & y \\neq y ^ {\\prime}. \\end{array} \\right.", + "image_path": "30d330db2d527a1af853e6e69d2372cf6bfbece2f70a00fe911ae1cecaac7242.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 426, + 194, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 426, + 194, + 437 + ], + "spans": [ + { + "bbox": [ + 67, + 426, + 194, + 437 + ], + "type": "text", + "content": "Then the objective of RL is," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 223, + 455, + 386, + 470 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 223, + 455, + 386, + 470 + ], + "spans": [ + { + "bbox": [ + 223, + 455, + 386, + 470 + ], + "type": "interline_equation", + "content": "\\mathcal {J} (\\theta) = \\mathbf {E} _ {(q, y) \\sim \\mathcal {D}, (z, y ^ {\\prime}) \\sim \\pi_ {\\theta} (q)} \\left[ R (y, y ^ {\\prime}) \\right]", + "image_path": "75405d92afda4e012384671ae4988f611e859df9081c367e30324e7241d5f375.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 479, + 542, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 479, + 542, + 492 + ], + "spans": [ + { + "bbox": [ + 67, + 479, + 542, + 492 + ], + "type": "text", + "content": "We run the vanilla PPO algorithm on a reasoning model, and propose the following strategy for improvement." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 498, + 543, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 498, + 543, + 581 + ], + "spans": [ + { + "bbox": [ + 67, + 498, + 543, + 581 + ], + "type": "text", + "content": "Data collection and filtering. We collect the dataset " + }, + { + "bbox": [ + 67, + 498, + 543, + 581 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 67, + 498, + 543, + 581 + ], + "type": "text", + "content": " by prompting one or multiple reasoning models to solve problems. For every problem in the dataset, we collect multiple solutions and construct a verification prompt with each solution using the template in Table 1. However, two cases may hurt the RL training, i.e., the extremely difficult problem, which we fail to sample any correct solutions and the extremely easy problems, which we fail to sample any wrong solutions. Such unbalanced data may teach the verifier to be biased on the difficulty of the problem, i.e., be optimistic on easy problems and pessimistic on difficult problems. Therefore, we do not include the data of the two cases in the training dataset." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 592, + 340, + 605 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 592, + 340, + 605 + ], + "spans": [ + { + "bbox": [ + 67, + 592, + 340, + 605 + ], + "type": "text", + "content": "3.2 Solution selection by Pessimistic Verification" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 611, + 542, + 743 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 611, + 542, + 743 + ], + "spans": [ + { + "bbox": [ + 67, + 611, + 542, + 743 + ], + "type": "text", + "content": "When tackling challenging problems, one can sample multiple solutions and leverage the verifier to identify the most likely correct one. By sampling verification responses multiple times, we can achieve more reliable judgments, thereby improving overall problem-solving performance. We propose a principled and flexible method for the inference time scaling along the two dimensions, i.e., the amount of solutions sampled from the solver model and the amount of verifications sampled from a verifier model. Denote the number of solutions to a problem as " + }, + { + "bbox": [ + 67, + 611, + 542, + 743 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 611, + 542, + 743 + ], + "type": "text", + "content": " and the number of verifications on each solution is " + }, + { + "bbox": [ + 67, + 611, + 542, + 743 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 67, + 611, + 542, + 743 + ], + "type": "text", + "content": ". We initially conceptualize the selection process as a multi-arm bandit problem, where each arm corresponds to a distinct answer, and each verification constitutes a visit to an arm. The reward is the verification result, which can be either 1 or 0. Each time the solver generates a solution, the arm representing the solution's answer receives " + }, + { + "bbox": [ + 67, + 611, + 542, + 743 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 67, + 611, + 542, + 743 + ], + "type": "text", + "content": " visits and immediately accrues " + }, + { + "bbox": [ + 67, + 611, + 542, + 743 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 67, + 611, + 542, + 743 + ], + "type": "text", + "content": " rewards. The straight-forward approach is to calculate the average reward each arm receives as its score and greedily select the one with the highest score. However, for those with few visits, the score fluctuates" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 763, + 309, + 771 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 763, + 309, + 771 + ], + "spans": [ + { + "bbox": [ + 302, + 763, + 309, + 771 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 98, + 543, + 143 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 98, + 543, + 143 + ], + "spans": [ + { + "bbox": [ + 67, + 98, + 543, + 143 + ], + "type": "text", + "content": "and can be unreliable. Following the pessimism principle in RL, we introduce the lower-confidence-bound, which adds an uncertainty penalty to the score. Let " + }, + { + "bbox": [ + 67, + 98, + 543, + 143 + ], + "type": "inline_equation", + "content": "r_0, r_1, \\ldots, r_K" + }, + { + "bbox": [ + 67, + 98, + 543, + 143 + ], + "type": "text", + "content": " be the average reward of each answer and " + }, + { + "bbox": [ + 67, + 98, + 543, + 143 + ], + "type": "inline_equation", + "content": "N_0, N_1, \\ldots, N_K" + }, + { + "bbox": [ + 67, + 98, + 543, + 143 + ], + "type": "text", + "content": " be the number of solutions that drives to a certain answer. The selection algorithm is defined as:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 226, + 144, + 542, + 169 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 226, + 144, + 542, + 169 + ], + "spans": [ + { + "bbox": [ + 226, + 144, + 542, + 169 + ], + "type": "interline_equation", + "content": "\\hat {a} := \\arg \\max _ {a _ {i}} \\left(r \\left(a _ {i}\\right) - \\alpha \\frac {\\ln (N M)}{N _ {i} M + 1}\\right) \\tag {1}", + "image_path": "308ded485fd606aed1a42b50ec1801fa34711d88d5f660645d0d588ba71d7474.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 178, + 544, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 178, + 544, + 215 + ], + "spans": [ + { + "bbox": [ + 67, + 178, + 544, + 215 + ], + "type": "text", + "content": "where the parameter " + }, + { + "bbox": [ + 67, + 178, + 544, + 215 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 67, + 178, + 544, + 215 + ], + "type": "text", + "content": " is a hyper-parameter that balances the consideration of uncertainty in the decision-making process and " + }, + { + "bbox": [ + 67, + 178, + 544, + 215 + ], + "type": "inline_equation", + "content": "\\ln" + }, + { + "bbox": [ + 67, + 178, + 544, + 215 + ], + "type": "text", + "content": " is the natural logarithm. Intuitively, the first term reflects the signals from the verifier, while the second term accounts for the bias of solver in the answer space." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 82, + 220, + 541, + 274 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 82, + 220, + 541, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 220, + 541, + 243 + ], + "spans": [ + { + "bbox": [ + 82, + 220, + 541, + 243 + ], + "type": "text", + "content": "- When " + }, + { + "bbox": [ + 82, + 220, + 541, + 243 + ], + "type": "inline_equation", + "content": "N_{i}" + }, + { + "bbox": [ + 82, + 220, + 541, + 243 + ], + "type": "text", + "content": " is small, the second term dominates, which neglects the verification and in the extreme case, it collapses to Majority Voting." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 82, + 251, + 540, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 251, + 540, + 274 + ], + "spans": [ + { + "bbox": [ + 82, + 251, + 540, + 274 + ], + "type": "text", + "content": "- When " + }, + { + "bbox": [ + 82, + 251, + 540, + 274 + ], + "type": "inline_equation", + "content": "N_{i}" + }, + { + "bbox": [ + 82, + 251, + 540, + 274 + ], + "type": "text", + "content": " is large, the first term becomes more important, and in the extreme case, it simply selects the answer with the best verification score." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 280, + 542, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 280, + 542, + 316 + ], + "spans": [ + { + "bbox": [ + 67, + 280, + 542, + 316 + ], + "type": "text", + "content": "The phase shift aligns with the fact that Majority Voting is trapped in the bias of the solver, for example when a wrong answer occurs more frequently than the correct one, and as " + }, + { + "bbox": [ + 67, + 280, + 542, + 316 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 280, + 542, + 316 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 280, + 542, + 316 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 67, + 280, + 542, + 316 + ], + "type": "text", + "content": " is large, the verification scores stabilize and we tend to trust more on it, because the verification is often easier than the solution." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 329, + 165, + 342 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 329, + 165, + 342 + ], + "spans": [ + { + "bbox": [ + 67, + 329, + 165, + 342 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 351, + 141, + 363 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 351, + 141, + 363 + ], + "spans": [ + { + "bbox": [ + 67, + 351, + 141, + 363 + ], + "type": "text", + "content": "4.1 Dataset" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 370, + 544, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 370, + 544, + 456 + ], + "spans": [ + { + "bbox": [ + 67, + 370, + 544, + 456 + ], + "type": "text", + "content": "Our experiment is on the math problems. The training dataset comes from the AoPS website and official math competition homepages, similar to that of DAPO [28]. We leverage DeepSeek-R1-Distill-Qwen-32B model as the policy model to generate 16 solutions to each problem. We leverage a rule-based program to check if the final answer in the solution is correct, which compares the reference answer of a problem and the answer in the solution and outputs the label, i.e., 1 for a correct response and 0 for the incorrect response. Then we construct the verification dataset with the prompt template in Table 1. To keep the prompt clean and short, we remove the `` part in each solution and only use the summary part." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 460, + 543, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 460, + 543, + 498 + ], + "spans": [ + { + "bbox": [ + 67, + 460, + 543, + 498 + ], + "type": "text", + "content": "We test the verification ability on both AIME2024 and AIME2025, 60 questions in total. During training, we monitor the performance on AIME2024 and select the best checkpoint as the final version of Heimdall. Therefore, one can treat AIME2024 as the validation dataset and AIME2025 as the test dataset." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 507, + 218, + 521 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 507, + 218, + 521 + ], + "spans": [ + { + "bbox": [ + 67, + 507, + 218, + 521 + ], + "type": "text", + "content": "4.2 Scaling of verification" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 526, + 543, + 671 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 526, + 543, + 671 + ], + "spans": [ + { + "bbox": [ + 67, + 526, + 543, + 671 + ], + "type": "text", + "content": "Figure 2 shows the accuracy and the length of response tokens during RL training. As depicted by the blue curve, both accuracy and response length increase with the number of training steps, albeit at different rates. At the early stage, the accuracy improves rapidly, but the response length fluctuates. This is because a minor adjustment to the policy can significantly boost accuracy. Later, the response length grows constantly, while the accuracy gradually converges to " + }, + { + "bbox": [ + 67, + 526, + 543, + 671 + ], + "type": "inline_equation", + "content": "94.5\\%" + }, + { + "bbox": [ + 67, + 526, + 543, + 671 + ], + "type": "text", + "content": ", because the model is learning to tackle the hardest part in the training dataset, which requires the increasingly more reasoning tokens. The red curve represents the RL training without the data filtering strategy, i.e., incorporating both extreme cases of difficult and easy problems in the dataset. As training progresses, the performance gap becomes more pronounced, indicating that the absence of contrastive examples detrimentally impacts performance. In addition, we test o1-mini with the same evaluation data, which is shown as the dash line in the left of Figure 2. Our model outperforms o1-mini in fewer than 20 steps, indicating substantial potential for enhancing the verification capabilities of general-purpose reasoning models." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 676, + 542, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 676, + 542, + 713 + ], + "spans": [ + { + "bbox": [ + 67, + 676, + 542, + 713 + ], + "type": "text", + "content": "We further look into cases to understand what Heimdall has learned during the training. Table 3 shows the verification of a correct solution to a hard problem in AIME2025. Due to space limitations, we only highlight some key points. We can observe two types of checking:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 82, + 718, + 541, + 743 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 718, + 541, + 743 + ], + "spans": [ + { + "bbox": [ + 82, + 718, + 541, + 743 + ], + "type": "text", + "content": "- Forward checking. It checks if the reasoning chain in every step of the solution is correct, which is generally applicable to all problems." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 763, + 309, + 772 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 763, + 309, + 772 + ], + "spans": [ + { + "bbox": [ + 302, + 763, + 309, + 772 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 70, + 97, + 299, + 269 + ], + "blocks": [ + { + "bbox": [ + 70, + 97, + 299, + 269 + ], + "lines": [ + { + "bbox": [ + 70, + 97, + 299, + 269 + ], + "spans": [ + { + "bbox": [ + 70, + 97, + 299, + 269 + ], + "type": "image", + "image_path": "8b644b76187ca40820b4d0310eeca3a8a74118f248c5eab10b3dc3316d3bdd02.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 280, + 541, + 314 + ], + "lines": [ + { + "bbox": [ + 67, + 280, + 541, + 314 + ], + "spans": [ + { + "bbox": [ + 67, + 280, + 541, + 314 + ], + "type": "text", + "content": "Figure 2 Accuracy and response length during RL training. PPO w/o data filtering is the RL training with all problems in the dataset. Left: the accuracy on AIME2024 with the training steps. Right: the response length on the training dataset with the training steps." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 301, + 97, + 541, + 269 + ], + "blocks": [ + { + "bbox": [ + 301, + 97, + 541, + 269 + ], + "lines": [ + { + "bbox": [ + 301, + 97, + 541, + 269 + ], + "spans": [ + { + "bbox": [ + 301, + 97, + 541, + 269 + ], + "type": "image", + "image_path": "707053fdd7c511e4dae86189152a9f568239d545b75fac1df3d40b0dcfd37403.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 83, + 335, + 543, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 335, + 543, + 371 + ], + "spans": [ + { + "bbox": [ + 83, + 335, + 543, + 371 + ], + "type": "text", + "content": "- Backward checking. It checks whether a conclusion, be it intermediate or final, fits the known constraints. For some types of problems like solving equations and finding the general term formula of a sequence, the backward checking is efficient and easy to implement." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 376, + 541, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 376, + 541, + 400 + ], + "spans": [ + { + "bbox": [ + 67, + 376, + 541, + 400 + ], + "type": "text", + "content": "The case exemplifies the common task of deriving a general formula for a sequence. As illustrated, Heimdall applies both methods of validation to confirm the correctness of the solution." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 407, + 541, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 407, + 541, + 634 + ], + "spans": [ + { + "bbox": [ + 67, + 407, + 541, + 634 + ], + "type": "text", + "content": "Next, we investigate how the verification ability scales as the number of verifications increases. We sample 64 solutions for each problem with the solver model and 64 verifications for each solution, resulting in a total of " + }, + { + "bbox": [ + 67, + 407, + 541, + 634 + ], + "type": "inline_equation", + "content": "30 \\times 64 \\times 64" + }, + { + "bbox": [ + 67, + 407, + 541, + 634 + ], + "type": "text", + "content": " responses on either AIME2024 or AIME2025 dataset. Denoting the number of verifications of each solution as " + }, + { + "bbox": [ + 67, + 407, + 541, + 634 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 407, + 541, + 634 + ], + "type": "text", + "content": ", we randomly select " + }, + { + "bbox": [ + 67, + 407, + 541, + 634 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 407, + 541, + 634 + ], + "type": "text", + "content": " verifications for each solution from the data collected above, and determine the final score by some aggregation operation, e.g., Majority Voting and averaging. We repeat the process for 2048 times to eliminate any fluctuations in the statistics. Taking Majority Voting as the aggregation operation, we compute the accuracy, the false positive rate and the false negative rate at every compute budget " + }, + { + "bbox": [ + 67, + 407, + 541, + 634 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 407, + 541, + 634 + ], + "type": "text", + "content": ", as is shown in the top of Figure 3. In addition, we take the average of " + }, + { + "bbox": [ + 67, + 407, + 541, + 634 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 407, + 541, + 634 + ], + "type": "text", + "content": " scores, a decimal number in [0, 1], as the final score, and draw the curve of the AUC score in the bottom-left of Figure 3. It shows that Heimdall's performance can be significantly improved by simply repeat sampling more trajectories. As " + }, + { + "bbox": [ + 67, + 407, + 541, + 634 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 407, + 541, + 634 + ], + "type": "text", + "content": " goes larger, the performance gradually converges to a upper limit. For example, the accuracy converges to about " + }, + { + "bbox": [ + 67, + 407, + 541, + 634 + ], + "type": "inline_equation", + "content": "97.5\\%" + }, + { + "bbox": [ + 67, + 407, + 541, + 634 + ], + "type": "text", + "content": " on AIME2024 and " + }, + { + "bbox": [ + 67, + 407, + 541, + 634 + ], + "type": "inline_equation", + "content": "96.0\\%" + }, + { + "bbox": [ + 67, + 407, + 541, + 634 + ], + "type": "text", + "content": " on AIME2025, and the remaining failure cases are the bias inherent in the model that could not be eliminated by adding more compute budget. We further analyze the distribution of those failure cases. For each problem, we calculate two statistics, one is the difficulty of solving it, which is estimated by the pass rate over its 64 solutions. and the other is the difficulty of verifying its solutions, which is estimated by the total number of verification failures on its solutions. Taking the two values as the x-axis and the y-axis respectively, we visualize their correlations in the bottom-right of Figure 3. We observe that the difficulty of a problem does not necessarily correlate to the difficulty of verifying its solutions. For example, Heimdall fails 17/64 times on a problem whose pass-rate is " + }, + { + "bbox": [ + 67, + 407, + 541, + 634 + ], + "type": "inline_equation", + "content": "67\\%" + }, + { + "bbox": [ + 67, + 407, + 541, + 634 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 639, + 541, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 639, + 541, + 688 + ], + "spans": [ + { + "bbox": [ + 67, + 639, + 541, + 688 + ], + "type": "text", + "content": "In addition, a clear observation is that the performance of Heimdall on AIME2025 is generally worse than that on AIME2024. We believe the main reason is that the verifying solutions on AIME2025 is harder than that on AIME2024. One evidence is that, o1-mini achieves " + }, + { + "bbox": [ + 67, + 639, + 541, + 688 + ], + "type": "inline_equation", + "content": "80.9\\%" + }, + { + "bbox": [ + 67, + 639, + 541, + 688 + ], + "type": "text", + "content": " in AIME2024 and " + }, + { + "bbox": [ + 67, + 639, + 541, + 688 + ], + "type": "inline_equation", + "content": "75.3\\%" + }, + { + "bbox": [ + 67, + 639, + 541, + 688 + ], + "type": "text", + "content": " in AIME2025, whose degradation is larger than that of our model." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 698, + 334, + 712 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 698, + 334, + 712 + ], + "spans": [ + { + "bbox": [ + 67, + 698, + 334, + 712 + ], + "type": "text", + "content": "4.3 Scaling of problem solving with verification" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 718, + 541, + 742 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 718, + 541, + 742 + ], + "spans": [ + { + "bbox": [ + 67, + 718, + 541, + 742 + ], + "type": "text", + "content": "In terms of problem solving, repeated sampling[1] is known to scale with the inference time compute. We evaluate multiple scaling algorithms, including both with and without the verifier. We analyze the inference" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 762, + 309, + 771 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 762, + 309, + 771 + ], + "spans": [ + { + "bbox": [ + 302, + 762, + 309, + 771 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 74, + 200, + 307, + 373 + ], + "blocks": [ + { + "bbox": [ + 74, + 200, + 307, + 373 + ], + "lines": [ + { + "bbox": [ + 74, + 200, + 307, + 373 + ], + "spans": [ + { + "bbox": [ + 74, + 200, + 307, + 373 + ], + "type": "image", + "image_path": "fe886d189f024c5120df42a013683bee8b158b3fc37ba3ee60cf7194f46f1c8d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 309, + 200, + 541, + 373 + ], + "blocks": [ + { + "bbox": [ + 309, + 200, + 541, + 373 + ], + "lines": [ + { + "bbox": [ + 309, + 200, + 541, + 373 + ], + "spans": [ + { + "bbox": [ + 309, + 200, + 541, + 373 + ], + "type": "image", + "image_path": "6f484e4be0cf4cc54219a65ea52ca98d6d801ffe275f018bf2ff3e34d42dd81f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 71, + 375, + 306, + 549 + ], + "blocks": [ + { + "bbox": [ + 71, + 375, + 306, + 549 + ], + "lines": [ + { + "bbox": [ + 71, + 375, + 306, + 549 + ], + "spans": [ + { + "bbox": [ + 71, + 375, + 306, + 549 + ], + "type": "image", + "image_path": "695975f877c08159c982c176f2bf6d3900e61dc49ccd1d75fb0027e1967b18eb.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 560, + 542, + 639 + ], + "lines": [ + { + "bbox": [ + 67, + 560, + 542, + 639 + ], + "spans": [ + { + "bbox": [ + 67, + 560, + 542, + 639 + ], + "type": "text", + "content": "Figure 3 The inference-time scaling of verification ability on problem solutions in AIME2024 and AIME2025. Top-left: We show the accuracy of Heimdall when we sample multiple verification responses and make the judgment by majority voting. Top-right: We show the decreasing false-negative rate(FNR) and false-positive rate(FPR) as we scale up verification responses with majority voting. Bottom-left: We calculate the average score of verification responses and draw the AUC along each number of responses. Bottom-right: We collect the verification failure cases on every math problem and draw the relation between the difficulty of the problem and the number of verification failures, which reveals that the verification difficulty may not necessarily correlate with the difficulty of the original problem." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 307, + 375, + 541, + 549 + ], + "blocks": [ + { + "bbox": [ + 307, + 375, + 541, + 549 + ], + "lines": [ + { + "bbox": [ + 307, + 375, + 541, + 549 + ], + "spans": [ + { + "bbox": [ + 307, + 375, + 541, + 549 + ], + "type": "image", + "image_path": "88fa108dfa2c896417548184733701d2de540dc592cd6d34e1de0766047ec256.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 762, + 309, + 771 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 762, + 309, + 771 + ], + "spans": [ + { + "bbox": [ + 302, + 762, + 309, + 771 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 80, + 213, + 91, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 213, + 91, + 219 + ], + "spans": [ + { + "bbox": [ + 80, + 213, + 91, + 219 + ], + "type": "text", + "content": "···" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 79, + 230, + 138, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 230, + 138, + 241 + ], + "spans": [ + { + "bbox": [ + 79, + 230, + 138, + 241 + ], + "type": "text", + "content": "**Problem**" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 79, + 242, + 402, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 242, + 402, + 257 + ], + "spans": [ + { + "bbox": [ + 79, + 242, + 402, + 257 + ], + "type": "text", + "content": "Let the sequence of rationals " + }, + { + "bbox": [ + 79, + 242, + 402, + 257 + ], + "type": "inline_equation", + "content": "x_{1}, x_{2}, \\ldots" + }, + { + "bbox": [ + 79, + 242, + 402, + 257 + ], + "type": "text", + "content": " be defined such that " + }, + { + "bbox": [ + 79, + 242, + 402, + 257 + ], + "type": "inline_equation", + "content": "x_{1} = \\frac{25}{11}" + }, + { + "bbox": [ + 79, + 242, + 402, + 257 + ], + "type": "text", + "content": " and" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 245, + 264, + 363, + 291 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 245, + 264, + 363, + 291 + ], + "spans": [ + { + "bbox": [ + 245, + 264, + 363, + 291 + ], + "type": "interline_equation", + "content": "x _ {k + 1} = \\frac {1}{3} \\left(x _ {k} + \\frac {1}{x _ {k}} - 1\\right).", + "image_path": "69acd5d7fcf2b3e74e76843cb7a1a4b47c6975f3bcd6412a620278d4168a9258.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 79, + 299, + 530, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 299, + 530, + 323 + ], + "spans": [ + { + "bbox": [ + 79, + 299, + 530, + 323 + ], + "type": "inline_equation", + "content": "x_{2025}" + }, + { + "bbox": [ + 79, + 299, + 530, + 323 + ], + "type": "text", + "content": " can be expressed as " + }, + { + "bbox": [ + 79, + 299, + 530, + 323 + ], + "type": "inline_equation", + "content": "\\frac{m}{n}" + }, + { + "bbox": [ + 79, + 299, + 530, + 323 + ], + "type": "text", + "content": " for relatively prime positive integers " + }, + { + "bbox": [ + 79, + 299, + 530, + 323 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 79, + 299, + 530, + 323 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 79, + 299, + 530, + 323 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 79, + 299, + 530, + 323 + ], + "type": "text", + "content": ". Find the remainder when " + }, + { + "bbox": [ + 79, + 299, + 530, + 323 + ], + "type": "inline_equation", + "content": "m + n" + }, + { + "bbox": [ + 79, + 299, + 530, + 323 + ], + "type": "text", + "content": " is divided by 1000." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 79, + 334, + 137, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 334, + 137, + 346 + ], + "spans": [ + { + "bbox": [ + 79, + 334, + 137, + 346 + ], + "type": "text", + "content": "**Solution**" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 80, + 351, + 91, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 351, + 91, + 357 + ], + "spans": [ + { + "bbox": [ + 80, + 351, + 91, + 357 + ], + "type": "text", + "content": "···" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 79, + 369, + 151, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 369, + 151, + 380 + ], + "spans": [ + { + "bbox": [ + 79, + 369, + 151, + 380 + ], + "type": "text", + "content": "**Verification**" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 79, + 382, + 119, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 382, + 119, + 392 + ], + "spans": [ + { + "bbox": [ + 79, + 382, + 119, + 392 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 79, + 393, + 529, + 417 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 393, + 529, + 417 + ], + "spans": [ + { + "bbox": [ + 79, + 393, + 529, + 417 + ], + "type": "text", + "content": "Okay, so I need to figure out if the given solution to this problem is correct. Let me read through it again to make sure I understand." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 80, + 418, + 164, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 418, + 164, + 429 + ], + "spans": [ + { + "bbox": [ + 80, + 418, + 164, + 429 + ], + "type": "text", + "content": "The problem is: ..." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 79, + 430, + 440, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 430, + 440, + 441 + ], + "spans": [ + { + "bbox": [ + 79, + 430, + 440, + 441 + ], + "type": "text", + "content": "The solution provided has four key steps. Let me go through each step and verify." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 79, + 441, + 529, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 441, + 529, + 465 + ], + "spans": [ + { + "bbox": [ + 79, + 441, + 529, + 465 + ], + "type": "text", + "content": "Step 1: They let " + }, + { + "bbox": [ + 79, + 441, + 529, + 465 + ], + "type": "inline_equation", + "content": "x_{k} = a_{k} / b_{k}" + }, + { + "bbox": [ + 79, + 441, + 529, + 465 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 79, + 441, + 529, + 465 + ], + "type": "inline_equation", + "content": "a_{k}" + }, + { + "bbox": [ + 79, + 441, + 529, + 465 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 79, + 441, + 529, + 465 + ], + "type": "inline_equation", + "content": "b_{k}" + }, + { + "bbox": [ + 79, + 441, + 529, + 465 + ], + "type": "text", + "content": " coprime. Then they write the recurrence relations as " + }, + { + "bbox": [ + 79, + 441, + 529, + 465 + ], + "type": "inline_equation", + "content": "a_{k + 1} = a_k^2 +b_k^2 -a_kb_k" + }, + { + "bbox": [ + 79, + 441, + 529, + 465 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 79, + 441, + 529, + 465 + ], + "type": "inline_equation", + "content": "b_{k + 1} = 3a_kb_k" + }, + { + "bbox": [ + 79, + 441, + 529, + 465 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 79, + 465, + 530, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 465, + 530, + 491 + ], + "spans": [ + { + "bbox": [ + 79, + 465, + 530, + 491 + ], + "type": "text", + "content": "Wait, let's verify that. Starting from " + }, + { + "bbox": [ + 79, + 465, + 530, + 491 + ], + "type": "inline_equation", + "content": "x_{k + 1} = (1 / 3)(x_k + 1 / x_k - 1)" + }, + { + "bbox": [ + 79, + 465, + 530, + 491 + ], + "type": "text", + "content": " . If " + }, + { + "bbox": [ + 79, + 465, + 530, + 491 + ], + "type": "inline_equation", + "content": "x_{k} = a_{k} / b_{k}" + }, + { + "bbox": [ + 79, + 465, + 530, + 491 + ], + "type": "text", + "content": " , then " + }, + { + "bbox": [ + 79, + 465, + 530, + 491 + ], + "type": "inline_equation", + "content": "1 / x_{k} = b_{k} / a_{k}" + }, + { + "bbox": [ + 79, + 465, + 530, + 491 + ], + "type": "text", + "content": " So " + }, + { + "bbox": [ + 79, + 465, + 530, + 491 + ], + "type": "inline_equation", + "content": "x_{k} + 1 / x_{k} = (a_{k}^{2} + b_{k}^{2}) / (a_{k}b_{k})" + }, + { + "bbox": [ + 79, + 465, + 530, + 491 + ], + "type": "text", + "content": " . Okay, so step 1 seems correct. [Forward checking]" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 80, + 495, + 91, + 500 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 495, + 91, + 500 + ], + "spans": [ + { + "bbox": [ + 80, + 495, + 91, + 500 + ], + "type": "text", + "content": "···" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 79, + 501, + 530, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 501, + 530, + 525 + ], + "spans": [ + { + "bbox": [ + 79, + 501, + 530, + 525 + ], + "type": "text", + "content": "Wait let me confirm with " + }, + { + "bbox": [ + 79, + 501, + 530, + 525 + ], + "type": "inline_equation", + "content": "S_{3}" + }, + { + "bbox": [ + 79, + 501, + 530, + 525 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 79, + 501, + 530, + 525 + ], + "type": "inline_equation", + "content": "a_{3} = a_{2}^{2} + b_{2}^{2} - a_{2}b_{2}" + }, + { + "bbox": [ + 79, + 501, + 530, + 525 + ], + "type": "text", + "content": ". Wait " + }, + { + "bbox": [ + 79, + 501, + 530, + 525 + ], + "type": "inline_equation", + "content": "a_{2} = 471" + }, + { + "bbox": [ + 79, + 501, + 530, + 525 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 79, + 501, + 530, + 525 + ], + "type": "inline_equation", + "content": "b_{2} = 825" + }, + { + "bbox": [ + 79, + 501, + 530, + 525 + ], + "type": "text", + "content": ". So " + }, + { + "bbox": [ + 79, + 501, + 530, + 525 + ], + "type": "inline_equation", + "content": "a_{3} = 471^{2} + 825^{2} - 471*825" + }, + { + "bbox": [ + 79, + 501, + 530, + 525 + ], + "type": "text", + "content": ". [Backward checking]" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 80, + 531, + 124, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 531, + 124, + 548 + ], + "spans": [ + { + "bbox": [ + 80, + 531, + 124, + 548 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 79, + 548, + 529, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 548, + 529, + 573 + ], + "spans": [ + { + "bbox": [ + 79, + 548, + 529, + 573 + ], + "type": "text", + "content": "The solution correctly arrives at the conclusion despite some initial confusion in their explanation. They properly computed each part modulo 1000, leading to the correct final result." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 80, + 574, + 126, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 574, + 126, + 583 + ], + "spans": [ + { + "bbox": [ + 80, + 574, + 126, + 583 + ], + "type": "text", + "content": "Answer: 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 67, + 607, + 541, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 607, + 541, + 640 + ], + "spans": [ + { + "bbox": [ + 67, + 607, + 541, + 640 + ], + "type": "text", + "content": "Table 2 A case of verification on hard problems. The verification process contains both forward checking, i.e., checking if each step is correct and backward checking, i.e., checking if a conclusion matches constraints, as is shown in the position of the red texts." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 763, + 308, + 771 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 763, + 308, + 771 + ], + "spans": [ + { + "bbox": [ + 302, + 763, + 308, + 771 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 70, + 99, + 269, + 278 + ], + "blocks": [ + { + "bbox": [ + 70, + 99, + 269, + 278 + ], + "lines": [ + { + "bbox": [ + 70, + 99, + 269, + 278 + ], + "spans": [ + { + "bbox": [ + 70, + 99, + 269, + 278 + ], + "type": "image", + "image_path": "a6ceaaf54bd7ad5fb8518d4d94c967fe0a5ccc33696d9d663f524fd60bc4d369.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 289, + 541, + 356 + ], + "lines": [ + { + "bbox": [ + 67, + 289, + 541, + 356 + ], + "spans": [ + { + "bbox": [ + 67, + 289, + 541, + 356 + ], + "type": "text", + "content": "Figure 4 The inference-time scaling of problem solving with Heimdall. The two figures show the accuracy on AIME datasets as the number of solutions scales up. Left: the problem solving accuracy on AIME2025 dataset scales with the number of solutions. The colored shaded area represents the area covered by the accuracy curves of a selection algorithm as the number of verifications increases from 1 to 64. Right: the contour map of the accuracy of Pessimistic Verification as the number of solutions (x-axis) and the number of verifications (y-axis) increase. The red curve indicates the optimal configurations within various overall compute budgets." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 271, + 99, + 539, + 278 + ], + "blocks": [ + { + "bbox": [ + 271, + 99, + 539, + 278 + ], + "lines": [ + { + "bbox": [ + 271, + 99, + 539, + 278 + ], + "spans": [ + { + "bbox": [ + 271, + 99, + 539, + 278 + ], + "type": "image", + "image_path": "be88d7c5a6c41fb2c49cdc172f640ecb7aa221ceb85fa1108b2a5cc20ca5ec0d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 376, + 541, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 376, + 541, + 461 + ], + "spans": [ + { + "bbox": [ + 67, + 376, + 541, + 461 + ], + "type": "text", + "content": "time scaling in two dimensions, i.e., the number of solutions " + }, + { + "bbox": [ + 67, + 376, + 541, + 461 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 376, + 541, + 461 + ], + "type": "text", + "content": " and the number of verifications on each solution " + }, + { + "bbox": [ + 67, + 376, + 541, + 461 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 67, + 376, + 541, + 461 + ], + "type": "text", + "content": ". Considering the huge computational cost, we only evaluate on AIME2025, with " + }, + { + "bbox": [ + 67, + 376, + 541, + 461 + ], + "type": "inline_equation", + "content": "N \\in [2, 256]" + }, + { + "bbox": [ + 67, + 376, + 541, + 461 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 376, + 541, + 461 + ], + "type": "inline_equation", + "content": "M \\in [1, 64]" + }, + { + "bbox": [ + 67, + 376, + 541, + 461 + ], + "type": "text", + "content": ". Similar to the analysis of verification accuracy, we first sample " + }, + { + "bbox": [ + 67, + 376, + 541, + 461 + ], + "type": "inline_equation", + "content": "N = 256" + }, + { + "bbox": [ + 67, + 376, + 541, + 461 + ], + "type": "text", + "content": " solutions for each problem and " + }, + { + "bbox": [ + 67, + 376, + 541, + 461 + ], + "type": "inline_equation", + "content": "M = 64" + }, + { + "bbox": [ + 67, + 376, + 541, + 461 + ], + "type": "text", + "content": " verifications for each solution as the complete data and then randomly select a subset to evaluate each scaling algorithm under a specific compute budget " + }, + { + "bbox": [ + 67, + 376, + 541, + 461 + ], + "type": "inline_equation", + "content": "(M, N)" + }, + { + "bbox": [ + 67, + 376, + 541, + 461 + ], + "type": "text", + "content": ". We repeat the sampling for 2048 time to get a stable average score. We set " + }, + { + "bbox": [ + 67, + 376, + 541, + 461 + ], + "type": "inline_equation", + "content": "\\alpha = 0.1" + }, + { + "bbox": [ + 67, + 376, + 541, + 461 + ], + "type": "text", + "content": " in Pessimistic Verification and also evaluate the other three selection algorithms as follows." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 474, + 541, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 474, + 541, + 523 + ], + "spans": [ + { + "bbox": [ + 67, + 474, + 541, + 523 + ], + "type": "text", + "content": "Majority Voting Majority voting is one of the most commonly used inference time scaling methods. It first categorize the solutions, e.g., by the final answers for math problems. It simply selects the category that contains the largest number of solutions in it. As fore-mentioned, majority voting can be seen as a special case of Pessimistic verification, where " + }, + { + "bbox": [ + 67, + 474, + 541, + 523 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 67, + 474, + 541, + 523 + ], + "type": "text", + "content": " is large enough to overshadow the signal of verification." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 536, + 541, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 536, + 541, + 573 + ], + "spans": [ + { + "bbox": [ + 67, + 536, + 541, + 573 + ], + "type": "text", + "content": "Shortest Majority Voting. The recent work [31] observes a length bias that the correct solutions are often shorter than incorrect ones for the same questions. Suppose the answer " + }, + { + "bbox": [ + 67, + 536, + 541, + 573 + ], + "type": "inline_equation", + "content": "a_i" + }, + { + "bbox": [ + 67, + 536, + 541, + 573 + ], + "type": "text", + "content": " occurs " + }, + { + "bbox": [ + 67, + 536, + 541, + 573 + ], + "type": "inline_equation", + "content": "c_i" + }, + { + "bbox": [ + 67, + 536, + 541, + 573 + ], + "type": "text", + "content": " times in the sampled responses and the average length of responses with the answer " + }, + { + "bbox": [ + 67, + 536, + 541, + 573 + ], + "type": "inline_equation", + "content": "a_i" + }, + { + "bbox": [ + 67, + 536, + 541, + 573 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 67, + 536, + 541, + 573 + ], + "type": "inline_equation", + "content": "l_i" + }, + { + "bbox": [ + 67, + 536, + 541, + 573 + ], + "type": "text", + "content": ", the voting score for " + }, + { + "bbox": [ + 67, + 536, + 541, + 573 + ], + "type": "inline_equation", + "content": "a_i" + }, + { + "bbox": [ + 67, + 536, + 541, + 573 + ], + "type": "text", + "content": " is" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 287, + 580, + 323, + 602 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 287, + 580, + 323, + 602 + ], + "spans": [ + { + "bbox": [ + 287, + 580, + 323, + 602 + ], + "type": "interline_equation", + "content": "s _ {i} = \\frac {c _ {i}}{l _ {i}}", + "image_path": "a476a87e3ba114e924d43e8cd89486842f35e498c586712ba4ffa2219ac91973.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 616, + 541, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 616, + 541, + 677 + ], + "spans": [ + { + "bbox": [ + 67, + 616, + 541, + 677 + ], + "type": "text", + "content": "Sampling-based Search. The work [33] leverages a commercial LLM as the verifier, and scales the inference-time computation on the number of sampled solutions and the number of verifications. During the selection, it calculates the average verification score of each solution and selects the solution with the largest score. Note that it does not group the solutions based on their answers, which is different from the special case of Pessimistic verification where " + }, + { + "bbox": [ + 67, + 616, + 541, + 677 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 67, + 616, + 541, + 677 + ], + "type": "text", + "content": " equals zero." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 683, + 541, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 683, + 541, + 731 + ], + "spans": [ + { + "bbox": [ + 67, + 683, + 541, + 731 + ], + "type": "text", + "content": "Tie-breaking rules. The selection algorithm may encounter a tie situation, where multiple options have the same score. In principle, one can introduce another model to compare which option is better, but for simplicity, we leverage the length prior to break the tie, namely, selecting the option with the shortest average solution length." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 763, + 308, + 771 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 763, + 308, + 771 + ], + "spans": [ + { + "bbox": [ + 302, + 763, + 308, + 771 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 98, + 543, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 98, + 543, + 301 + ], + "spans": [ + { + "bbox": [ + 67, + 98, + 543, + 301 + ], + "type": "text", + "content": "The left of Figure 4 shows how the accuracy of different scaling algorithms changes with the number of solutions " + }, + { + "bbox": [ + 67, + 98, + 543, + 301 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 98, + 543, + 301 + ], + "type": "text", + "content": ". Majority voting are the worst among all scaling algorithms under the same " + }, + { + "bbox": [ + 67, + 98, + 543, + 301 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 98, + 543, + 301 + ], + "type": "text", + "content": ". By employing the length prior, Shortest Majority Voting gives a better accuracy when " + }, + { + "bbox": [ + 67, + 98, + 543, + 301 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 98, + 543, + 301 + ], + "type": "text", + "content": " is small, but finally converges to " + }, + { + "bbox": [ + 67, + 98, + 543, + 301 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 67, + 98, + 543, + 301 + ], + "type": "text", + "content": ", the same as that of Majority Voting. The red and blue areas represent the areas covered by the group of accuracy curves with " + }, + { + "bbox": [ + 67, + 98, + 543, + 301 + ], + "type": "inline_equation", + "content": "M \\in [1,64]" + }, + { + "bbox": [ + 67, + 98, + 543, + 301 + ], + "type": "text", + "content": " of Sampling-based Search and Pessimistic Verification respectively. As the figure shows, with the help of verification, the accuracy is significantly improved. In addition, Pessimistic Verification is better than Sampling-based Search when " + }, + { + "bbox": [ + 67, + 98, + 543, + 301 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 67, + 98, + 543, + 301 + ], + "type": "text", + "content": " is small. The reason is that the verification process is inherently probabilities. Even for a easy task, it is still possible that a wrong solution is judged as correct and is finally selected as the final solution. With the second term in Equation 1, Pessimistic Verification penalizes such uncertain cases and favors those with more visits. As " + }, + { + "bbox": [ + 67, + 98, + 543, + 301 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 67, + 98, + 543, + 301 + ], + "type": "text", + "content": " goes larger, the verification scores stabilize and the second term becomes smaller, and the gap between the two algorithms gets smaller. Interestingly, when " + }, + { + "bbox": [ + 67, + 98, + 543, + 301 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 98, + 543, + 301 + ], + "type": "text", + "content": " is large, the gap is large again. By checking the typical cases, we find that it comes from the grouping of solutions. Pessimistic Verification aggregate the solutions with their answers while Sampling-based Search treats each solution independently. Two solutions may have the same final answer, but their approaches or expressions can be entirely different. The aggregation takes this variance into consideration, so makes more robust selection. Note that such grouping is not generally applicable, e.g., grouping the solutions of proof problems is not straightforward. In those cases, we expect the two algorithm converges to the same limit." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "spans": [ + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "type": "text", + "content": "Taking the accuracy of Pessimistic Verification as a function of " + }, + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "type": "text", + "content": ", we draw the contour map in the right of Figure 4. " + }, + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "type": "inline_equation", + "content": "M = 0" + }, + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "type": "text", + "content": " represents the vanilla Majority Voting without verifications. We can see that increasing either " + }, + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "type": "text", + "content": " improves the performance. Each point " + }, + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "type": "inline_equation", + "content": "(M, N)" + }, + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "type": "text", + "content": " involves " + }, + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "type": "text", + "content": " responses by the solver model and " + }, + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "type": "inline_equation", + "content": "M \\times N" + }, + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "type": "text", + "content": " responses by the verifier, which is " + }, + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "type": "inline_equation", + "content": "M \\times (N + 1)" + }, + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "type": "text", + "content": " responses in total. By minimizing the overall budget, we derive the compute-optimal configurations for different compute budgets, with the constraint that " + }, + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "type": "text", + "content": " are non-negative integers. As is shown in the figure, we need to alternately increase " + }, + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "type": "text", + "content": ", but we should increase " + }, + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 308, + 543, + 403 + ], + "type": "text", + "content": " more frequently. The reason is that the correct answer for a hard problem is sparse. To get the problem solved with a scaling algorithm, we first need to give a sufficient budget to get the correct answer." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 409, + 543, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 409, + 543, + 529 + ], + "spans": [ + { + "bbox": [ + 67, + 409, + 543, + 529 + ], + "type": "text", + "content": "Remaining room for improvement. Can Heimdall be better? The black dashed curve in the left of Figure 4 is Best-of-N that selects the response that equals the ground-truth answer, which is the upper-limit of any scaling algorithms. When " + }, + { + "bbox": [ + 67, + 409, + 543, + 529 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 409, + 543, + 529 + ], + "type": "text", + "content": " is small, Pessimistic Verification Pessimistic Verification performs near the upper limit, but the gap widens as " + }, + { + "bbox": [ + 67, + 409, + 543, + 529 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 409, + 543, + 529 + ], + "type": "text", + "content": " increases. Consider the configuration " + }, + { + "bbox": [ + 67, + 409, + 543, + 529 + ], + "type": "inline_equation", + "content": "M = 256" + }, + { + "bbox": [ + 67, + 409, + 543, + 529 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 409, + 543, + 529 + ], + "type": "inline_equation", + "content": "N = 64" + }, + { + "bbox": [ + 67, + 409, + 543, + 529 + ], + "type": "text", + "content": ". Pessimistic Verification gets a score of " + }, + { + "bbox": [ + 67, + 409, + 543, + 529 + ], + "type": "inline_equation", + "content": "83.3\\%" + }, + { + "bbox": [ + 67, + 409, + 543, + 529 + ], + "type": "text", + "content": " and the upper limit is " + }, + { + "bbox": [ + 67, + 409, + 543, + 529 + ], + "type": "inline_equation", + "content": "93.3\\%" + }, + { + "bbox": [ + 67, + 409, + 543, + 529 + ], + "type": "text", + "content": ", so the gap is 3 problems. Looking into the individual problems, we find that there are 4 problems that have only one correct solution among the 256 solutions. Heimdall manages to identify the correct solution on one of them, which is the case fore-mentioned in Table 3, but fails on the other three problems. The failed three problems involve spatial reasoning, which the base model of Heimdall is not very skilled at. We believe that as the ability of the base model becomes better, the verification ability can reach the upper limit." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 535, + 543, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 535, + 543, + 679 + ], + "spans": [ + { + "bbox": [ + 67, + 535, + 543, + 679 + ], + "type": "text", + "content": "Coordination with other solver models. In the previous experiments, we use DeepSeek-R1-Distill-Qwen-32B as the solver model, which is the model to collect data during the training phase. To test Heimdall's generalization to other solver models, we test on DeepSeek-R1 and Gemini 2.5 Pro. For DeepSeek-R1, we directly extract the summary in its response, while for Gemini 2.5 Pro, we leverage another LLM to summarize its solution because we observe that its responses contains many reflections, which might confuse the verification process. Due to the limited compute budget, we set the total number of verifications to 16 and the total number of solving attempts to 64. We find that the verification accuracy on DeepSeek-R1 and Gemini 2.5 Pro is " + }, + { + "bbox": [ + 67, + 535, + 543, + 679 + ], + "type": "inline_equation", + "content": "90.1\\%" + }, + { + "bbox": [ + 67, + 535, + 543, + 679 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 535, + 543, + 679 + ], + "type": "inline_equation", + "content": "89.9\\%" + }, + { + "bbox": [ + 67, + 535, + 543, + 679 + ], + "type": "text", + "content": ", respectively, close to that on DeepSeek-R1-Distill-Qwen-32B. The scaling in problem-solving, as is shown in the right of Figure 1, indicates that Heimdall consistently improves the accuracy with various solver models and using stronger solver models results in higher accuracy. Meanwhile, the improvements for Gemini 2.5 Pro is smaller compared to those of the other models. The reason is that the accuracy is already high on AIME2025 and the base model of Heimdall inherently lacks some ability on certain problems." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 691, + 322, + 705 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 691, + 322, + 705 + ], + "spans": [ + { + "bbox": [ + 67, + 691, + 322, + 705 + ], + "type": "text", + "content": "5 Generalization to math proof problems" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 715, + 543, + 739 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 715, + 543, + 739 + ], + "spans": [ + { + "bbox": [ + 67, + 715, + 543, + 739 + ], + "type": "text", + "content": "Heimdall is trained on math problems with explicit answers because it is easy to determine the correctness of the solution. However, there are many other types of problems, some of which might be more challenging." + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 762, + 311, + 772 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 762, + 311, + 772 + ], + "spans": [ + { + "bbox": [ + 300, + 762, + 311, + 772 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 79, + 205, + 530, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 205, + 530, + 242 + ], + "spans": [ + { + "bbox": [ + 79, + 205, + 530, + 242 + ], + "type": "text", + "content": "Here is a math problem and a solution of it. Think step by step and verify if each proof step in solution is correct. The last line of your response should be of the form Answer: $Answer (without quotes) where $Answer is 1 if the solution is correct and 0 if incorrect." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 79, + 252, + 138, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 252, + 138, + 263 + ], + "spans": [ + { + "bbox": [ + 79, + 252, + 138, + 263 + ], + "type": "text", + "content": "**Problem**" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 79, + 265, + 529, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 265, + 529, + 289 + ], + "spans": [ + { + "bbox": [ + 79, + 265, + 529, + 289 + ], + "type": "text", + "content": "The n players of a hockey team gather to select their team captain. Initially, they stand in a circle, and each person votes for the person on their left." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 79, + 289, + 530, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 289, + 530, + 373 + ], + "spans": [ + { + "bbox": [ + 79, + 289, + 530, + 373 + ], + "type": "text", + "content": "The players will update their votes via a series of rounds. In one round, each player updates their vote, one at a time, according to the following procedure: At the time of the update, if a is voting for b, and b is voting for c, then a updates their vote to c. (Note that a, b, and c need not be distinct; if " + }, + { + "bbox": [ + 79, + 289, + 530, + 373 + ], + "type": "inline_equation", + "content": "\\mathrm{b} = \\mathrm{c}" + }, + { + "bbox": [ + 79, + 289, + 530, + 373 + ], + "type": "text", + "content": ", then a's vote does not change for this update.) Every player updates their vote exactly once in each round, in an order determined by the players (possibly different across different rounds). They repeat this updating procedure for n rounds. Prove that at this time, all n players will unanimously vote for the same person." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 79, + 384, + 138, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 384, + 138, + 408 + ], + "spans": [ + { + "bbox": [ + 79, + 384, + 138, + 408 + ], + "type": "text", + "content": "\\*\\*Solution\\*\\* ..." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 79, + 419, + 151, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 419, + 151, + 443 + ], + "spans": [ + { + "bbox": [ + 79, + 419, + 151, + 443 + ], + "type": "text", + "content": "**Verification**" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 79, + 452, + 529, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 452, + 529, + 503 + ], + "spans": [ + { + "bbox": [ + 79, + 452, + 529, + 503 + ], + "type": "text", + "content": "Wait, but since the solution didn't present this reasoning, their proof is flawed. They presented an incorrect model of the process, leading to an overestimation of the rounds needed, but the conclusion is still correct. However, the problem is to verify if each step in their solution is correct. Since their step 4's reasoning is wrong, their proof is incorrect." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 79, + 508, + 124, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 508, + 124, + 525 + ], + "spans": [ + { + "bbox": [ + 79, + 508, + 124, + 525 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 79, + 526, + 529, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 526, + 529, + 573 + ], + "spans": [ + { + "bbox": [ + 79, + 526, + 529, + 573 + ], + "type": "text", + "content": "The solution's step 4 incorrectly assumes that each person's vote jumps " + }, + { + "bbox": [ + 79, + 526, + 529, + 573 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 79, + 526, + 529, + 573 + ], + "type": "text", + "content": " times, leading to covering the entire ring. The correct approach is to note that each round reduces the number of distinct targets by one, leading to convergence in " + }, + { + "bbox": [ + 79, + 526, + 529, + 573 + ], + "type": "inline_equation", + "content": "n-1" + }, + { + "bbox": [ + 79, + 526, + 529, + 573 + ], + "type": "text", + "content": " rounds. Thus, while the conclusion is correct, the reasoning in step 4 is flawed." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 80, + 575, + 127, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 575, + 127, + 586 + ], + "spans": [ + { + "bbox": [ + 80, + 575, + 127, + 586 + ], + "type": "text", + "content": "Answer: 0" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 609, + 541, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 609, + 541, + 643 + ], + "spans": [ + { + "bbox": [ + 67, + 609, + 541, + 643 + ], + "type": "text", + "content": "Table 3 A case of verification on the math proof problems. The problem is P1 in Canadian Mathematical Olympiad 2025. We modify the prompt template to check the proof process rather than the final answer, which is the red text. Heimdall checks the proof step by step and finds that the solution lacks rigorous proofs in step 4." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 762, + 310, + 772 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 762, + 310, + 772 + ], + "spans": [ + { + "bbox": [ + 299, + 762, + 310, + 772 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 98, + 543, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 98, + 543, + 278 + ], + "spans": [ + { + "bbox": [ + 67, + 98, + 543, + 278 + ], + "type": "text", + "content": "In this section, we would like to test Heimdall's capability in verifying mathematical proof problems. We select 10 proof problems from Mathematics Olympiad of different countries from the years 2024 and 2025, and leverage a solver model, i.e., DeepSeek-R1-Distill-Qwen-32B, to generate a proof process for each problem. Considering that the solver model is not good at spatial reasoning, we do not select geometry-related problems. Heimdall is then employed to check the correctness of each proof. Finally, we have experts evaluate both the proof processes and Heimdall's verifications. The solver model correctly solves 2 problems, while the remaining 8 are incorrect. To our surprise, Heimdall correctly judges 9/10 cases, identifying 2/2 correct proofs and detecting issues in 7/8 incorrect proofs. There is 1 problem where Heimdall fail to identify the error in the proof, resulting in a false-negative judgment. Looking into the specific cases, we find that Heimdall judges the correctness with both forward and backward checking, e.g., checking each step and testify with examples. It is capable of identifying most errors or unproven assumptions in the solution, but for some subtle problems, e.g., the assumption does not appear in the form of a proposition but is implicitly assumed during the proof process, Heimdall might fail. We believe that introducing the proof data in the RL training would improve the performance of Heimdall and an important direction in the future would be how to generate the dataset in large scale." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 291, + 372, + 305 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 291, + 372, + 305 + ], + "spans": [ + { + "bbox": [ + 67, + 291, + 372, + 305 + ], + "type": "text", + "content": "6 Verification on automatic knowledge discovery" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 314, + 543, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 314, + 543, + 458 + ], + "spans": [ + { + "bbox": [ + 67, + 314, + 543, + 458 + ], + "type": "text", + "content": "In the process of human exploration of the unknown, some scientists pose questions, some propose solutions to these questions, and others focus on verifying the correctness of solutions provided by their predecessors. Verification itself, as a crucial part of knowledge discovery, ensures the correctness of new knowledge. In this section, we design a prototype that simulates the stages of posing questions and solving them, using the synthesis of math problems by NuminaMath [12], to evaluate Heimdall's effectiveness in detecting problematic knowledge. NuminaMath open-sources a comprehensive collection of 860,000 pairs of math problems and reference solutions. It includes 229,982 MATH-level pairs and 62,108 AMC-AIME-level pairs that are synthesized from seed problems in MATH and AMC-AIME training dataset. We test Heimdall on the harder one, i.e., the AMC-AIME dataset. Flaws can exist either within the problem itself, such as an unsolvable problem, or within the solution provided. Theoretically, by checking if the solution satisfies all the requirements in the problem, Heimdall can detect both flaws. Meanwhile, the task also indirectly tests the generalization capabilities of Heimdall, as the qualities of the problems in the training set is much higher." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 464, + 289, + 726 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 464, + 289, + 726 + ], + "spans": [ + { + "bbox": [ + 67, + 464, + 289, + 726 + ], + "type": "text", + "content": "We randomly sample 8,192 pairs of questions and solutions as the test set. For each pair, we construct the prompt and query Heimdall 8 times. We calculate the sum of the verification scores, which ranges from 0 to 8 and illustrate the distribution in Figure 5. As is shown, near a half of the data is labeled incorrect with a high confidence, which is consistent with the experience listed in the latest NuminaMath-1.5 website [13] that by the ablation study, the authors find that this dataset hurts a bit the performance and plan to remove all synthetic data until they find a way to reliably generate high-quality synthetic problems. To measure the verification correctness, we randomly select 10 cases in the 0-scored group and manually check their correctness. We find that for all cases, the judgments by Heimdall are correct. Table 4 is a case of the problematic data and the verification. Due to space limitations, we retain only the essential information, with the rest omitted. The question does not have the correct answer among its options. Meanwhile, the solution mistakenly" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 305, + 479, + 539, + 653 + ], + "blocks": [ + { + "bbox": [ + 305, + 479, + 539, + 653 + ], + "lines": [ + { + "bbox": [ + 305, + 479, + 539, + 653 + ], + "spans": [ + { + "bbox": [ + 305, + 479, + 539, + 653 + ], + "type": "image", + "image_path": "2f73438f093255b5510d6470fa706f84aab4ade82b7a97970530daabc2a053ef.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 662, + 542, + 707 + ], + "lines": [ + { + "bbox": [ + 302, + 662, + 542, + 707 + ], + "spans": [ + { + "bbox": [ + 302, + 662, + 542, + 707 + ], + "type": "text", + "content": "Figure 5 The distribution of verification scores on the problems of a synthetic dataset. The x-axis is the sum of scores across 8 verifications and the y-axis is the number of problems corresponding to each sum." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 727, + 541, + 738 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 727, + 541, + 738 + ], + "spans": [ + { + "bbox": [ + 67, + 727, + 541, + 738 + ], + "type": "text", + "content": "uses an incorrect value in the equation and interestingly arrives at one of the given options. We hypothesize" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 762, + 311, + 772 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 762, + 311, + 772 + ], + "spans": [ + { + "bbox": [ + 300, + 762, + 311, + 772 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 80, + 169, + 118, + 181 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 169, + 118, + 181 + ], + "spans": [ + { + "bbox": [ + 80, + 169, + 118, + 181 + ], + "type": "text", + "content": "Prompt:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 78, + 191, + 531, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 191, + 531, + 217 + ], + "spans": [ + { + "bbox": [ + 78, + 191, + 531, + 217 + ], + "type": "text", + "content": "Here is a math problem and a solution of it. Think step by step and verify if the final answer in the solution is correct. ..." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 78, + 227, + 138, + 238 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 227, + 138, + 238 + ], + "spans": [ + { + "bbox": [ + 78, + 227, + 138, + 238 + ], + "type": "text", + "content": "**Problem**" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 78, + 239, + 531, + 298 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 239, + 531, + 298 + ], + "spans": [ + { + "bbox": [ + 78, + 239, + 531, + 298 + ], + "type": "text", + "content": "A rabbit starts jumping from the point " + }, + { + "bbox": [ + 78, + 239, + 531, + 298 + ], + "type": "inline_equation", + "content": "(2,3)" + }, + { + "bbox": [ + 78, + 239, + 531, + 298 + ], + "type": "text", + "content": " on a rectangular board that extends from " + }, + { + "bbox": [ + 78, + 239, + 531, + 298 + ], + "type": "inline_equation", + "content": "(0,0)" + }, + { + "bbox": [ + 78, + 239, + 531, + 298 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 78, + 239, + 531, + 298 + ], + "type": "inline_equation", + "content": "(6,5)" + }, + { + "bbox": [ + 78, + 239, + 531, + 298 + ], + "type": "text", + "content": ". Each jump it makes is either left, right, up, or down, and has a length of 1 unit. Each jump direction is uniformly and randomly chosen. The jumping sequence continues until the rabbit reaches any border of the rectangle. Calculate the probability that the rabbit's sequence of jumps ends on the top or bottom side of the rectangle." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 79, + 299, + 223, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 299, + 223, + 312 + ], + "spans": [ + { + "bbox": [ + 79, + 299, + 223, + 312 + ], + "type": "text", + "content": "(A). " + }, + { + "bbox": [ + 79, + 299, + 223, + 312 + ], + "type": "inline_equation", + "content": "\\frac{1}{4}" + }, + { + "bbox": [ + 79, + 299, + 223, + 312 + ], + "type": "text", + "content": " (B). " + }, + { + "bbox": [ + 79, + 299, + 223, + 312 + ], + "type": "inline_equation", + "content": "\\frac{17}{32}" + }, + { + "bbox": [ + 79, + 299, + 223, + 312 + ], + "type": "text", + "content": " (C). " + }, + { + "bbox": [ + 79, + 299, + 223, + 312 + ], + "type": "inline_equation", + "content": "\\frac{17}{24}" + }, + { + "bbox": [ + 79, + 299, + 223, + 312 + ], + "type": "text", + "content": " (D). " + }, + { + "bbox": [ + 79, + 299, + 223, + 312 + ], + "type": "inline_equation", + "content": "\\frac{1}{3}" + }, + { + "bbox": [ + 79, + 299, + 223, + 312 + ], + "type": "text", + "content": " (E). " + }, + { + "bbox": [ + 79, + 299, + 223, + 312 + ], + "type": "inline_equation", + "content": "\\frac{3}{4}" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 79, + 322, + 137, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 322, + 137, + 333 + ], + "spans": [ + { + "bbox": [ + 79, + 322, + 137, + 333 + ], + "type": "text", + "content": "**Solution**" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 78, + 334, + 531, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 334, + 531, + 369 + ], + "spans": [ + { + "bbox": [ + 78, + 334, + 531, + 369 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 78, + 334, + 531, + 369 + ], + "type": "inline_equation", + "content": "P_{(x,y)}" + }, + { + "bbox": [ + 78, + 334, + 531, + 369 + ], + "type": "text", + "content": " denote the probability of the rabbit ending on the top or bottom side starting from " + }, + { + "bbox": [ + 78, + 334, + 531, + 369 + ], + "type": "inline_equation", + "content": "(x,y)" + }, + { + "bbox": [ + 78, + 334, + 531, + 369 + ], + "type": "text", + "content": ". Using symmetry about the center line " + }, + { + "bbox": [ + 78, + 334, + 531, + 369 + ], + "type": "inline_equation", + "content": "y = 2.5" + }, + { + "bbox": [ + 78, + 334, + 531, + 369 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 78, + 334, + 531, + 369 + ], + "type": "inline_equation", + "content": "x = 3" + }, + { + "bbox": [ + 78, + 334, + 531, + 369 + ], + "type": "text", + "content": ", and recursive relations, we analyze the movements." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 80, + 376, + 91, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 376, + 91, + 381 + ], + "spans": [ + { + "bbox": [ + 80, + 376, + 91, + 381 + ], + "type": "text", + "content": "···" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 78, + 382, + 529, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 382, + 529, + 407 + ], + "spans": [ + { + "bbox": [ + 78, + 382, + 529, + 407 + ], + "type": "text", + "content": "Solving Equations: We solve this using recursive approaches and considering border cases explicitly. Assuming symmetry simplifies " + }, + { + "bbox": [ + 78, + 382, + 529, + 407 + ], + "type": "inline_equation", + "content": "P_{(3,2)} = P_{(3,3)}" + }, + { + "bbox": [ + 78, + 382, + 529, + 407 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 78, + 407, + 426, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 407, + 426, + 418 + ], + "spans": [ + { + "bbox": [ + 78, + 407, + 426, + 418 + ], + "type": "text", + "content": "Here, let's consider only two key points " + }, + { + "bbox": [ + 78, + 407, + 426, + 418 + ], + "type": "inline_equation", + "content": "(2,3)" + }, + { + "bbox": [ + 78, + 407, + 426, + 418 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 78, + 407, + 426, + 418 + ], + "type": "inline_equation", + "content": "(3,3)" + }, + { + "bbox": [ + 78, + 407, + 426, + 418 + ], + "type": "text", + "content": " for simplicity. Solving:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 210, + 426, + 400, + 449 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 210, + 426, + 400, + 449 + ], + "spans": [ + { + "bbox": [ + 210, + 426, + 400, + 449 + ], + "type": "interline_equation", + "content": "P _ {(2, 3)} = \\frac {1}{4} P _ {(2, 2)} + \\frac {1}{4} (1) + \\frac {1}{4} P _ {(1, 3)} + \\frac {1}{4} P _ {(3, 3)}", + "image_path": "18a064ca0d0b73a8fb4229b47b7d2a7fe6336ab43b4b51442dac85fa57e6e4fd.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 79, + 461, + 91, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 461, + 91, + 466 + ], + "spans": [ + { + "bbox": [ + 79, + 461, + 91, + 466 + ], + "type": "text", + "content": "··" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 78, + 466, + 531, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 466, + 531, + 509 + ], + "spans": [ + { + "bbox": [ + 78, + 466, + 531, + 509 + ], + "type": "text", + "content": "Thus, the rabbit's sequence of jumps ends on the top or bottom side with a probability of " + }, + { + "bbox": [ + 78, + 466, + 531, + 509 + ], + "type": "inline_equation", + "content": "\\frac{17}{24}" + }, + { + "bbox": [ + 78, + 466, + 531, + 509 + ], + "type": "text", + "content": ". The final answer is (C) " + }, + { + "bbox": [ + 78, + 466, + 531, + 509 + ], + "type": "inline_equation", + "content": "\\boxed{\\frac{17}{24}}" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 80, + 517, + 127, + 528 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 517, + 127, + 528 + ], + "spans": [ + { + "bbox": [ + 80, + 517, + 127, + 528 + ], + "type": "text", + "content": "Response:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 79, + 539, + 120, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 539, + 120, + 551 + ], + "spans": [ + { + "bbox": [ + 79, + 539, + 120, + 551 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 80, + 558, + 91, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 558, + 91, + 563 + ], + "spans": [ + { + "bbox": [ + 80, + 558, + 91, + 563 + ], + "type": "text", + "content": "···" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 79, + 564, + 124, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 564, + 124, + 574 + ], + "spans": [ + { + "bbox": [ + 79, + 564, + 124, + 574 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 78, + 575, + 531, + 623 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 575, + 531, + 623 + ], + "spans": [ + { + "bbox": [ + 78, + 575, + 531, + 623 + ], + "type": "text", + "content": "The solution provided incorrectly assumes that " + }, + { + "bbox": [ + 78, + 575, + 531, + 623 + ], + "type": "inline_equation", + "content": "\\mathrm{P}(2,4) = 1" + }, + { + "bbox": [ + 78, + 575, + 531, + 623 + ], + "type": "text", + "content": ", which is not true since (2,4) is not a top or bottom border. They oversimplified the equations by not accounting for the correct dependencies of other points like " + }, + { + "bbox": [ + 78, + 575, + 531, + 623 + ], + "type": "inline_equation", + "content": "\\mathrm{P}(2,4)" + }, + { + "bbox": [ + 78, + 575, + 531, + 623 + ], + "type": "text", + "content": " and mishandled the symmetry in y-direction due to different boundary conditions " + }, + { + "bbox": [ + 78, + 575, + 531, + 623 + ], + "type": "inline_equation", + "content": "(\\mathrm{P} = 0" + }, + { + "bbox": [ + 78, + 575, + 531, + 623 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 78, + 575, + 531, + 623 + ], + "type": "inline_equation", + "content": "\\mathrm{y} = 0" + }, + { + "bbox": [ + 78, + 575, + 531, + 623 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 78, + 575, + 531, + 623 + ], + "type": "inline_equation", + "content": "\\mathrm{P} = 1" + }, + { + "bbox": [ + 78, + 575, + 531, + 623 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 78, + 575, + 531, + 623 + ], + "type": "inline_equation", + "content": "\\mathrm{y} = 5" + }, + { + "bbox": [ + 78, + 575, + 531, + 623 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 80, + 624, + 127, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 624, + 127, + 634 + ], + "spans": [ + { + "bbox": [ + 80, + 624, + 127, + 634 + ], + "type": "text", + "content": "Answer: 0" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 67, + 657, + 542, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 657, + 542, + 681 + ], + "spans": [ + { + "bbox": [ + 67, + 657, + 542, + 681 + ], + "type": "text", + "content": "Table 4 An example of problematic synthetic data and the verification. The verification correctly points out the problem in the solution." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 762, + 312, + 772 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 762, + 312, + 772 + ], + "spans": [ + { + "bbox": [ + 299, + 762, + 312, + 772 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 99, + 542, + 124 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 99, + 542, + 124 + ], + "spans": [ + { + "bbox": [ + 67, + 99, + 542, + 124 + ], + "type": "text", + "content": "that the task is somewhat challenging for the LLM used for synthetic data generation, leading the LLM to hallucinate a superficially consistent but wrong output." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 135, + 165, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 135, + 165, + 148 + ], + "spans": [ + { + "bbox": [ + 67, + 135, + 165, + 148 + ], + "type": "text", + "content": "7 Future Work" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 158, + 544, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 158, + 544, + 303 + ], + "spans": [ + { + "bbox": [ + 67, + 158, + 544, + 303 + ], + "type": "text", + "content": "Firstly, the verification dataset is formed by simply extracting the summary part of a reasoning model, which as we observed is sometimes overly brief, e.g., driving to an answer with only one sentence. A more detailed explanation would make the verification easier. One can further improve the verification accuracy by summarizing the reasoning process with another LLM. Secondly, we mainly evaluate the verification ability on math problems that have final answers. There are many other types of tasks, e.g., coding problems and Mathematical proof problems. Although the learned ability is generalizable to other domains, we expect it beneficial to train with data in other domains. For example, in the context of coding problems, backward checking may take the alternative form of designing test cases. Lastly, we only prototype the usage of Heimdall in the automatic knowledge discovery. In real scenarios, posing valuable questions is a challenging task that demands both curiosity and keen insight. Such ability is often the critical part of the scientific discovery, which however is seldom investigated. We believe that as the general capabilities of LLM continues to advance, this direction will become more and more important." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 315, + 155, + 328 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 315, + 155, + 328 + ], + "spans": [ + { + "bbox": [ + 67, + 315, + 155, + 328 + ], + "type": "text", + "content": "8 Conclusion" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 338, + 544, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 338, + 544, + 436 + ], + "spans": [ + { + "bbox": [ + 67, + 338, + 544, + 436 + ], + "type": "text", + "content": "In this paper, we propose to train a long CoT verifier called Heimdall with reinforcement learning. On the competitive math problems, Heimdall achieves high accuracy and scales well along both the length of reasoning chains and the number of repeated generation. Through human evaluation, we find that Heimdall also shows impressive generalization ability on out-of-domain problems, such as math proofs. We further propose the inference time scaling algorithm called Pessimistic Verification, which incorporates a solver and Heimdall for problem solving. By scaling up the compute, we can achieve the performance comparable to top-tier models on challenging math problems. Lastly, we design a prototype of automatic knowledge discovery and demonstrate that Heimdall can reliably detect flaws in the synthetic data from another LLM." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 447, + 201, + 462 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 447, + 201, + 462 + ], + "spans": [ + { + "bbox": [ + 67, + 447, + 201, + 462 + ], + "type": "text", + "content": "9 Acknowledgments" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 470, + 542, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 470, + 542, + 495 + ], + "spans": [ + { + "bbox": [ + 67, + 470, + 542, + 495 + ], + "type": "text", + "content": "We thank the data annotation team for their expertise on collecting the evaluation data and analyzing the verification outputs, including Bocheng Zhou, Weijian Zhao, Tong Sun and Zhiyuan Zhang." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 762, + 312, + 772 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 762, + 312, + 772 + ], + "spans": [ + { + "bbox": [ + 300, + 762, + 312, + 772 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 96, + 137, + 108 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 96, + 137, + 108 + ], + "spans": [ + { + "bbox": [ + 69, + 96, + 137, + 108 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 119, + 544, + 727 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 73, + 119, + 544, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 119, + 544, + 152 + ], + "spans": [ + { + "bbox": [ + 73, + 119, + 544, + 152 + ], + "type": "text", + "content": "[1] Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling. arXiv preprint arXiv:2407.21787, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 73, + 157, + 543, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 157, + 543, + 192 + ], + "spans": [ + { + "bbox": [ + 73, + 157, + 543, + 192 + ], + "type": "text", + "content": "[2] Ahmed El-Kishky, Alexander Wei, Andre Saraiva, Borys Minaev, Daniel Selsam, David Dohan, Francis Song, Hunter Lightman, Ignasi Clavera, Jakub Pachocki, et al. Competitive programming with large reasoning models. arXiv preprint arXiv:2502.06807, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 73, + 197, + 543, + 230 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 197, + 543, + 230 + ], + "spans": [ + { + "bbox": [ + 73, + 197, + 543, + 230 + ], + "type": "text", + "content": "[3] Google. Gemini 2.5: Our most intelligent ai model, 2025. URL https://blog.google/technology/google-deepmind/gemini-model-thinking-updates-march-2025/#gemini-2-5-thinking. Accessed: 2025-03-25." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 73, + 236, + 543, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 236, + 543, + 248 + ], + "spans": [ + { + "bbox": [ + 73, + 236, + 543, + 248 + ], + "type": "text", + "content": "[4] Grok. Grok 3 beta — the age of reasoning agents, 2025. URL https://x.ai/news/grok-3. Accessed: 2025-02-19." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 73, + 253, + 543, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 253, + 543, + 277 + ], + "spans": [ + { + "bbox": [ + 73, + 253, + 543, + 277 + ], + "type": "text", + "content": "[5] Jiawei Gu, Xuhui Jiang, Zhichao Shi, Hexiang Tan, Xuehao Zhai, Chengjin Xu, Wei Li, Yinghan Shen, Shengjie Ma, Honghao Liu, et al. A survey on lmm-as-a-judge. arXiv preprint arXiv:2411.15594, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 73, + 281, + 543, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 281, + 543, + 315 + ], + "spans": [ + { + "bbox": [ + 73, + 281, + 543, + 315 + ], + "type": "text", + "content": "[6] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 73, + 319, + 543, + 343 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 319, + 543, + 343 + ], + "spans": [ + { + "bbox": [ + 73, + 319, + 543, + 343 + ], + "type": "text", + "content": "[7] Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 73, + 347, + 543, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 347, + 543, + 371 + ], + "spans": [ + { + "bbox": [ + 73, + 347, + 543, + 371 + ], + "type": "text", + "content": "[8] Minki Kang, Jongwon Jeong, and Jaewoong Cho. T1: Tool-integrated self-verification for test-time compute scaling in small language models, 2025. URL https://arxiv.org/abs/2504.04718." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 73, + 375, + 543, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 375, + 543, + 408 + ], + "spans": [ + { + "bbox": [ + 73, + 375, + 543, + 408 + ], + "type": "text", + "content": "[9] Tian Lan, Wenwei Zhang, Chengqi Lyu, Shuaibin Li, Chen Xu, Heyan Huang, Dahua Lin, Xian-Ling Mao, and Kai Chen. Training language models to critique with multi-agent feedback. arXiv preprint arXiv:2410.15287, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 415, + 543, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 415, + 543, + 448 + ], + "spans": [ + { + "bbox": [ + 69, + 415, + 543, + 448 + ], + "type": "text", + "content": "[10] Tian Lan, Wenwei Zhang, Chen Xu, Heyan Huang, Dahua Lin, Kai Chen, and Xian-Ling Mao. Criticeval: Evaluating large-scale language model as critic. Advances in Neural Information Processing Systems, 37:66907-66960, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 453, + 543, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 453, + 543, + 487 + ], + "spans": [ + { + "bbox": [ + 69, + 453, + 543, + 487 + ], + "type": "text", + "content": "[11] Dawei Li, Bohan Jiang, Liangjie Huang, Alimohammad Beigi, Chengshuai Zhao, Zhen Tan, Amrita Bhattacharjee, Yuxuan Jiang, Canyu Chen, Tianhao Wu, et al. From generation to judgment: Opportunities and challenges of llm-as-a-judge. arXiv preprint arXiv:2411.16594, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 492, + 543, + 526 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 492, + 543, + 526 + ], + "spans": [ + { + "bbox": [ + 69, + 492, + 543, + 526 + ], + "type": "text", + "content": "[12] Jia Li, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Huang, Kashif Rasul, Longhui Yu, Albert Q Jiang, Ziju Shen, et al. Numinamath: The largest public dataset in ai4maths with 860k pairs of competition math problems and solutions. Hugging Face repository, 13:9, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 530, + 543, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 530, + 543, + 576 + ], + "spans": [ + { + "bbox": [ + 69, + 530, + 543, + 576 + ], + "type": "text", + "content": "[13] Jia LI, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Costa Huang, Kashif Rasul, Longhui Yu, Albert Jiang, Ziju Shen, Zihan Qin, Bin Dong, Li Zhou, Yann Fleureau, Guillaume Lample, and Stanislas Polu. Numinamath. [https://huggingface.co/AI-MO/NuminaMath-1.5](https://github.com/project-numina/aimo-progress-prize/blob/main/report/numina_dataset.pdf), 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 580, + 543, + 604 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 580, + 543, + 604 + ], + "spans": [ + { + "bbox": [ + 69, + 580, + 543, + 604 + ], + "type": "text", + "content": "[14] Zicheng Lin, Zhibin Gou, Tian Liang, Ruilin Luo, Haowei Liu, and Yujiu Yang. Criticbench: Benchmarking llms for critique-correct reasoning. arXiv preprint arXiv:2402.14809, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 609, + 543, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 609, + 543, + 632 + ], + "spans": [ + { + "bbox": [ + 69, + 609, + 543, + 632 + ], + "type": "text", + "content": "[15] Liangchen Luo, Zi Lin, Yinxiao Liu, Lei Shu, Yun Zhu, Jingbo Shang, and Lei Meng. Critique ability of large language models. arXiv preprint arXiv:2310.04815, 2023." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 636, + 543, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 636, + 543, + 669 + ], + "spans": [ + { + "bbox": [ + 69, + 636, + 543, + 669 + ], + "type": "text", + "content": "[16] Ruotian Ma, Peisong Wang, Cheng Liu, Xingyan Liu, Jiaqi Chen, Bang Zhang, Xin Zhou, Nan Du, and Jia Li. S²r: Teaching llms to self-verify and self-correct via reinforcement learning. arXiv preprint arXiv:2502.12853, 2025." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 675, + 543, + 699 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 675, + 543, + 699 + ], + "spans": [ + { + "bbox": [ + 69, + 675, + 543, + 699 + ], + "type": "text", + "content": "[17] Dakota Mahan, Duy Van Phung, Rafael Rafailov, Chase Blagden, Nathan Lile, Louis Castricato, Jan-Philipp Franken, Chelsea Finn, and Alon Albalak. Generative reward models. arXiv preprint arXiv:2410.12832, 2024." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 69, + 703, + 543, + 727 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 703, + 543, + 727 + ], + "spans": [ + { + "bbox": [ + 69, + 703, + 543, + 727 + ], + "type": "text", + "content": "[18] Nat McAleese, Rai Michael Pokorny, Juan Felipe Ceron Uribe, Evgenia Nitishinskaya, Maja Trebacz, and Jan Leike. Llm critics help catch llm bugs. arXiv preprint arXiv:2407.00215, 2024." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 762, + 311, + 772 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 762, + 311, + 772 + ], + "spans": [ + { + "bbox": [ + 299, + 762, + 311, + 772 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 98, + 544, + 645 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 69, + 98, + 544, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 98, + 544, + 133 + ], + "spans": [ + { + "bbox": [ + 69, + 98, + 544, + 133 + ], + "type": "text", + "content": "[19] Qian Pan, Zahra Ashktorab, Michael Desmond, Martin Santillan Cooper, James Johnson, Rahul Nair, Elizabeth Daly, and Werner Geyer. Human-centered design recommendations for lmm-as-a-judge. arXiv preprint arXiv:2407.03479, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 137, + 543, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 137, + 543, + 161 + ], + "spans": [ + { + "bbox": [ + 67, + 137, + 543, + 161 + ], + "type": "text", + "content": "[20] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 165, + 542, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 165, + 542, + 190 + ], + "spans": [ + { + "bbox": [ + 69, + 165, + 542, + 190 + ], + "type": "text", + "content": "[21] Guijin Son, Hyunwoo Ko, Hoyoung Lee, Yewon Kim, and Seunghyeok Hong. Llm-as-a-judge & reward model: What they can and cannot do. arXiv preprint arXiv:2409.11239, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 193, + 543, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 193, + 543, + 228 + ], + "spans": [ + { + "bbox": [ + 69, + 193, + 543, + 228 + ], + "type": "text", + "content": "[22] Linzhuang Sun, Hao Liang, Jingxuan Wei, Bihui Yu, Tianpeng Li, Fan Yang, Zenan Zhou, and Wentao Zhang. Mm-verify: Enhancing multimodal reasoning with chain-of-thought verification. arXiv preprint arXiv:2502.13383, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 232, + 506, + 245 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 232, + 506, + 245 + ], + "spans": [ + { + "bbox": [ + 69, + 232, + 506, + 245 + ], + "type": "text", + "content": "[23] Rich Sutton. Verification, the key to ai. URL http://incompleteideas.net/IncIdeas/KeytoAI.html." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 249, + 544, + 283 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 249, + 544, + 283 + ], + "spans": [ + { + "bbox": [ + 69, + 249, + 544, + 283 + ], + "type": "text", + "content": "[24] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 287, + 541, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 287, + 541, + 312 + ], + "spans": [ + { + "bbox": [ + 69, + 287, + 541, + 312 + ], + "type": "text", + "content": "[25] Yubo Wang, Xiang Yue, and Wenhu Chen. Critique fine-tuning: Learning to critique is more effective than learning to imitate. arXiv preprint arXiv:2501.17703, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 316, + 541, + 339 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 316, + 541, + 339 + ], + "spans": [ + { + "bbox": [ + 69, + 316, + 541, + 339 + ], + "type": "text", + "content": "[26] Zhihui Xie, Liyu Chen, Weichao Mao, Jingjing Xu, Lingpeng Kong, et al. Teaching language models to critique via reinforcement learning. arXiv preprint arXiv:2502.03492, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 343, + 541, + 378 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 343, + 541, + 378 + ], + "spans": [ + { + "bbox": [ + 69, + 343, + 541, + 378 + ], + "type": "text", + "content": "[27] Jiayi Ye, Yanbo Wang, Yue Huang, Dongping Chen, Qihui Zhang, Nuno Moniz, Tian Gao, Werner Geyer, Chao Huang, Pin-Yu Chen, et al. Justice or prejudice? quantifying biases in llm-as-a-judge. arXiv preprint arXiv:2410.02736, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 382, + 541, + 416 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 382, + 541, + 416 + ], + "spans": [ + { + "bbox": [ + 69, + 382, + 541, + 416 + ], + "type": "text", + "content": "[28] Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, et al. Dapo: An open-source llm reinforcement learning system at scale. arXiv preprint arXiv:2503.14476, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 421, + 541, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 421, + 541, + 456 + ], + "spans": [ + { + "bbox": [ + 69, + 421, + 541, + 456 + ], + "type": "text", + "content": "[29] Yue Yu, Zhengxing Chen, Aston Zhang, Liang Tan, Chenguang Zhu, Richard Yuanzhe Pang, Yundi Qian, Xuewei Wang, Suchin Gururangan, Chao Zhang, et al. Self-generated critiques boost reward modeling for language models. arXiv preprint arXiv:2411.16646, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 460, + 541, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 460, + 541, + 495 + ], + "spans": [ + { + "bbox": [ + 69, + 460, + 541, + 495 + ], + "type": "text", + "content": "[30] Wojciech Zaremba, Evgenia Nitishinskaya, Boaz Barak, Stephanie Lin, Sam Toyer, Yaodong Yu, Rachel Dias, Eric Wallace, Kai Xiao, Johannes Heidecke, et al. Trading inference-time compute for adversarial robustness. arXiv preprint arXiv:2501.18841, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 498, + 541, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 498, + 541, + 523 + ], + "spans": [ + { + "bbox": [ + 69, + 498, + 541, + 523 + ], + "type": "text", + "content": "[31] Zhiyuan Zeng, Qinyuan Cheng, Zhangyue Yin, Yunhua Zhou, and Xipeng Qiu. Revisiting the test-time scaling of o1-like models: Do they truly possess test-time scaling capabilities? arXiv preprint arXiv:2502.12215, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 526, + 541, + 551 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 526, + 541, + 551 + ], + "spans": [ + { + "bbox": [ + 69, + 526, + 541, + 551 + ], + "type": "text", + "content": "[32] Lunjun Zhang, Arian Hosseini, Hritik Bansal, Mehran Kazemi, Aviral Kumar, and Rishabh Agarwal. Generative verifiers: Reward modeling as next-token prediction. arXiv preprint arXiv:2408.15240, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 554, + 541, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 554, + 541, + 578 + ], + "spans": [ + { + "bbox": [ + 69, + 554, + 541, + 578 + ], + "type": "text", + "content": "[33] Eric Zhao, Pranjal Awasthi, and Sreenivas Gollapudi. Sample, scrutinize and scale: Effective inference-time search by scaling verification. arXiv preprint arXiv:2502.01839, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 582, + 541, + 617 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 582, + 541, + 617 + ], + "spans": [ + { + "bbox": [ + 69, + 582, + 541, + 617 + ], + "type": "text", + "content": "[34] Jian Zhao, Runze Liu, Kaiyan Zhang, Zhimu Zhou, Junqi Gao, Dong Li, Jiafei Lyu, Zhouyi Qian, Biqing Qi, Xiu Li, et al. Genprm: Scaling test-time compute of process reward models via generative reasoning. arXiv preprint arXiv:2504.00891, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 621, + 541, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 621, + 541, + 645 + ], + "spans": [ + { + "bbox": [ + 69, + 621, + 541, + 645 + ], + "type": "text", + "content": "[35] Jianyuan Zhong, Zeju Li, Zhijian Xu, Xiangyu Wen, and Qiang Xu. Dyve: Thinking fast and slow for dynamic process verification. arXiv preprint arXiv:2502.11157, 2025." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 763, + 311, + 772 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 763, + 311, + 772 + ], + "spans": [ + { + "bbox": [ + 300, + 763, + 311, + 772 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10342/0ae2a017-8bf2-4c96-bb86-70cf5464648b_content_list.json b/data/2025/2504_10xxx/2504.10342/0ae2a017-8bf2-4c96-bb86-70cf5464648b_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..f585f597733f1541db0fedbdc7af6a9814bc2b9c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/0ae2a017-8bf2-4c96-bb86-70cf5464648b_content_list.json @@ -0,0 +1,19098 @@ +[ + { + "type": "text", + "text": "VISUALPUZZLES: Decoupling Multimodal Reasoning Evaluation from Domain Knowledge", + "text_level": 1, + "bbox": [ + 171, + 98, + 828, + 142 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yueqi Song\\*, Tianyue Ou\\*, Yibo Kong†, Zecheng Li†, Graham Neubig, Xiang Yue {yueqis, tianyueo, gneubig, xyue2}@cs.cmu.edu", + "bbox": [ + 179, + 166, + 777, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Carnegie Mellon University", + "bbox": [ + 393, + 207, + 620, + 224 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "https://neulab.github.io/VisualPuzzles/", + "bbox": [ + 336, + 236, + 660, + 252 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 457, + 266, + 540, + 282 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Current multimodal benchmarks often conflate reasoning with domain-specific knowledge, making it difficult to isolate and evaluate general reasoning abilities in non-expert settings. To address this, we introduce VISUALPUZZLES, a benchmark that targets visual reasoning while deliberately minimizing reliance on specialized knowledge. VISUALPUZZLES consists of diverse questions spanning five categories: algorithmic, analogical, deductive, inductive, and spatial reasoning. One major source of our questions is manually translated logical reasoning questions from the Chinese Civil Service Examination. Experiments show that VISUALPUZZLES requires significantly less intensive domain-specific knowledge and more complex reasoning compared to benchmarks like MMMU, enabling us to better evaluate genuine multimodal reasoning. Evaluations show that state-of-the-art multimodal large language models consistently lag behind human performance on VISUALPUZZLES, and that strong performance on knowledge-intensive benchmarks does not necessarily translate to success on reasoning-focused, knowledge-light tasks. Additionally, reasoning enhancements such as scaling up inference compute (with \"thinking\" modes) yield inconsistent gains across models and task types, and we observe no clear correlation between model size and performance. We also found that models exhibit different reasoning and answering patterns on VISUALPUZZLES compared to benchmarks with heavier emphasis on knowledge. VISUALPUZZLES offers a clearer lens through which to evaluate reasoning capabilities beyond factual recall and domain knowledge.", + "bbox": [ + 228, + 301, + 769, + 625 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/b49fbe7e0699eadfaf858d3fd9a5d59db1d54bb1ba3708e26d31d65d8c714c0e.jpg", + "image_caption": [ + "Figure 1: Model accuracy on VISUALPUZZLES compared to human performance percentiles. All evaluated models fall below the human 5th percentile (57.5%), highlighting the difficulty of VISUALPUZZLES. Interestingly, models with explicit \"thinking\" modes do not consistently outperform their base versions, suggesting that current reasoning strategies do not yet generalize well to VISUALPUZZLES's scenarios, even though these strategies have proven effective in existing reasoning tasks that often rely heavily on domain-specific knowledge." + ], + "image_footnote": [], + "bbox": [ + 173, + 646, + 823, + 790 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.10342v3 [cs.CL] 30 Apr 2025", + "bbox": [ + 22, + 265, + 60, + 708 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Equal Contributions.", + "bbox": [ + 189, + 896, + 336, + 910 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "Equal Contributions.", + "bbox": [ + 192, + 910, + 336, + 924 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/92bbb61a0dc500ca4cc6ff7ae9e4bf125395e05e5543c7fc02bd64ec2111821d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 173, + 99, + 390, + 223 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/1ed8ee9704d40204c4a09619e1b5a1f5ac09cbf72757ce1849a071e4e1de96c6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 392, + 101, + 607, + 222 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/78312d3960e121d282f214dae295f027c4d2a797ed5d6d36b898b32f29426c95.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 609, + 101, + 823, + 222 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/d6fcb4af5124da5e912611d8cc6c3d5089127f9bcd51a49bbc48a62769953913.jpg", + "image_caption": [ + "Figure 2: Example VISUALPUZZLES instances within each reasoning category" + ], + "image_footnote": [], + "bbox": [ + 173, + 224, + 496, + 344 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/bf86655e1940e3c1363d3ce9ef3b6abf9cd477dffada06d559f29bcafaa6f316.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 224, + 823, + 344 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 171, + 401, + 320, + 417 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Reasoning is a cornerstone of both human and artificial intelligence, enabling systems to solve problems, draw inferences, and make decisions from information. Recent advances in multimodal large language models (MLLMs) (OpenAI, 2024; Liu et al., 2023a; Li et al., 2024; Dubey et al., 2024; Qwen Team, 2025a; Yue et al., 2025) exhibit early signs of reasoning in tackling complex tasks such as answering expert-level visual questions (Yue et al., 2024a;b), interpreting scientific diagrams (Roberts et al., 2024), and solving challenging math word problems (Lu et al., 2023).", + "bbox": [ + 169, + 450, + 826, + 549 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Many of the tasks mentioned above are inherently knowledge-intensive; large amounts of knowledge in domains such as science or math are necessary to answer questions correctly (Yue et al., 2024a). However, in reality, reasoning does not necessitate knowledge. Even non-expert humans can successfully solve logic puzzles, spatial reasoning problems, and analogical tasks using general inferential skills, without requiring deep domain expertise. This raises an important question: Can we measure MLLMs's reasoning ability independently of measuring their acquisition of domain-specific knowledge? This question is particularly important with the recent rapid development of reasoning models in the textual domain (Jaech et al., 2024; DeepSeek-AI, 2025; Qwen Team, 2025b), and emerging application to the visual domain (Qwen Team, 2024).", + "bbox": [ + 169, + 555, + 828, + 696 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address this question, we introduce VISUALPUZZLES, a multimodal benchmark explicitly crafted to assess reasoning capabilities independent of specialized knowledge. VISUALPUZZLES comprises 1,168 carefully curated puzzle-like questions that span five distinct categories of reasoning: algorithmic, analogical, deductive, inductive, and spatial, each annotated with varying difficulty levels. VISUALPUZZLES only requires basic common knowledge and the information presented in the question to solve problems, disentangling reasoning from domain-specific knowledge recall. Our experiments show that VISUALPUZZLES requires significantly fewer domain-specific knowledge concepts compared to benchmarks like MMMU, and models have sufficient knowledge required to solve VISUALPUZZLES questions, enabling us to better assess multimodal reasoning versus pretrained factual knowledge. While VISUALPUZZLES minimizes reliance on domain expertise, its reasoning complexity exceeds that of existing benchmarks: in VISUALPUZZLES, $82.1\\%$ of models' solution steps are logical reasoning steps, compared to $71.5\\%$ in MMMU. Additionally, no current MLLM surpasses even the 5th-percentile human performance, highlighting the benchmark's difficulty and the limitations of today's models in general-purpose visual reasoning.", + "bbox": [ + 169, + 700, + 828, + 925 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 491, + 946, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our experiments with VISUALPUZZLES reveal critical limitations in current MLLMs' multimodal reasoning ability by factoring out domain-specific knowledge requirements and only focusing on reasoning. Specifically, we uncover four key findings:", + "bbox": [ + 169, + 102, + 826, + 148 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Strong performance on knowledge-heavy benchmarks does not transfer well. Models that rank highly on MathVista and MMMU often experience substantial performance drops on VISUALPUZZLES, highlighting a disconnect between knowledge-rich and knowledge-light multimodal reasoning tasks.", + "- Humans outperform models on easy and medium tasks, while both degrade on harder ones. Human participants show strong and consistent performance on easy and medium-level questions across reasoning categories. In contrast, models struggle even on simpler tasks.", + "- Reasoning enhancements (e.g., long CoT and \"thinking\" mode) yield inconsistent gains. While explicit reasoning strategies help certain models tackle complex reasoning tasks, these techniques do not consistently improve performance across all model families and task types.", + "- Scaling model size does not ensure stronger reasoning. We observe no clear trend indicating that larger models outperform smaller ones on VISUALPUZZLES, suggesting that scaling up parameters alone is insufficient to improve domain-agnostic multimodal reasoning." + ], + "bbox": [ + 173, + 159, + 826, + 397 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 VISUALPUZZLES", + "text_level": 1, + "bbox": [ + 171, + 417, + 356, + 431 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 Motivation and Design Principles of VISUALPUZZLES", + "text_level": 1, + "bbox": [ + 171, + 449, + 614, + 465 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Existing benchmarks often conflate multimodal reasoning with domain-specific knowledge, making it difficult to isolate and measure the pure reasoning capabilities of these models.", + "bbox": [ + 169, + 474, + 826, + 505 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "VISUALPUZZLES is designed to explicitly address this issue by providing a testbed focused on evaluating multimodal reasoning in isolation from specialized knowledge. Specifically, VISUALPUZZLES centers on puzzle-like questions that rely solely on the provided image, question text, and basic common-sense reasoning. The core design principle behind VISUALPUZZLES is to limit the need for external or pretrained domain knowledge. Figure 2 shows examples of VISUALPUZZLES within each reasoning category.", + "bbox": [ + 169, + 510, + 826, + 595 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2 Data Collection and Curation", + "text_level": 1, + "bbox": [ + 171, + 611, + 431, + 626 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We curated VISUALPUZZLES using a multi-stage pipeline. The process involved sourcing, adapting, and validating questions with an emphasis on reasoning quality and minimal reliance on specialized knowledge.", + "bbox": [ + 169, + 637, + 826, + 681 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Question Sourcing. We collected questions from three primary sources: (1) online resources and textbooks focused on logical, visual, and spatial puzzles, (2) synthesized items using images from large-scale vision datasets paired with text prompts, and (3) carefully repurposed items from existing multimodal reasoning benchmarks. Each source was selected to ensure a wide variety of reasoning challenges while avoiding trivial or fact-heavy questions. One major source of our questions is manually translated logical reasoning questions from the Chinese Civil Service Examination1. Other sources are listed in Appendix A.", + "bbox": [ + 169, + 686, + 826, + 787 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Format Adaptation. All collected items were adapted into a consistent multiple-choice format with four options, balancing between text-based and image-based answer choices. This modality balance allows us to better test models' abilities to perform reasoning across diverse formats.", + "bbox": [ + 169, + 792, + 826, + 849 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Data Validation. During curation, we applied strict filtering criteria to eliminate questions requiring advanced mathematical knowledge, specialized domain knowledge and facts. Questions were retained only if they could be solved using information present in the image,", + "bbox": [ + 169, + 854, + 828, + 901 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_footnote", + "text": "$^{1}$ Chinese Civil Service Examination (Logic Test), 中国国家公务员考试行测(逻辑推理)", + "bbox": [ + 189, + 907, + 756, + 925 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "the question prompt, and basic common sense. A multi-round validation process was conducted by human annotators, focusing on question clarity, solvability, and reasoning type classification.", + "bbox": [ + 169, + 103, + 823, + 147 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Attribute Annotation. Finally, each question was annotated with two key attributes:", + "bbox": [ + 171, + 152, + 784, + 169 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "- Reasoning Category: Each item was categorized as algorithmic, analogical, deductive, inductive, or spatial reasoning. These five categories were selected as they represent fundamental forms of reasoning widely discussed in literature (Liu et al., 2020; Lu et al., 2023; Yue et al., 2024a; Gao et al., 2023). At the same time, we aimed to balance comprehensiveness with conciseness, avoiding an overly fine-grained taxonomy that could dilute the benchmark's clarity and usability. This categorization ensures that VISUALPUZZLES covers a broad yet manageable set of reasoning skills relevant to multimodal LLM evaluation.", + "bbox": [ + 171, + 176, + 826, + 275 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Algorithmic Reasoning involves reasoning over algorithmic rules.", + "- Analogical Reasoning requires analyzing the relationships between a pair of entities.", + "- Deductive Reasoning involves logically drawing conclusions from known premises.", + "- Inductive Reasoning focuses on generalizing rules from observed patterns.", + "- Spatial Reasoning requires interpreting and manipulating spatial relationships." + ], + "bbox": [ + 204, + 277, + 825, + 354 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "- Difficulty Level: Labeled as easy, medium, or hard, based on annotators' estimated cognitive load and time-to-solve metrics.", + "bbox": [ + 171, + 356, + 823, + 386 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This pipeline ensures that VISUALPUZZLES presents a diverse set of high-quality questions designed to challenge multimodal LLMs on their reasoning abilities without involving", + "bbox": [ + 169, + 393, + 823, + 424 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "pretrained domain knowledge.", + "bbox": [ + 171, + 424, + 398, + 438 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.3 Dataset Statistics", + "text_level": 1, + "bbox": [ + 171, + 453, + 339, + 467 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "VISUALPUZZLES comprises 1,168 multimodal reasoning puzzles. It is designed to provide a balanced distribution across different reasoning categories, difficulty levels, and option formats for comprehensive evaluation. The statistics of VISUALPUZZLES are shown in Table 1.", + "bbox": [ + 169, + 479, + 509, + 564 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Across the five reasoning types, we maintain a roughly even distribution, ensuring that no single reasoning style dominates the benchmark.", + "bbox": [ + 169, + 570, + 509, + 612 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/363631807b6384d0673ac736965367e311b38d5638c70e070bdfb37313c8d221.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
CategoryStatistics
Total Questions1168
- Algorithmic Reasoning262
- Analogical Reasoning211
- Deductive Reasoning200
- Inductive Reasoning209
- Spatial Reasoning286
Easy/Medium/Hard46%/39%/15%
Option Type (Image/Text)57%/43%
AVG. Question Length154.9
% Easy Words54%
", + "bbox": [ + 522, + 431, + 821, + 583 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Table 1: Statistics of VISUALPUZZLES", + "bbox": [ + 535, + 585, + 805, + 599 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Similarly, we balanced the dataset across the three difficulty levels (easy, medium, hard) to capture a wide spectrum of cognitive demands. Approximately half of the answer choices in the dataset are image-based and the other half are text-based, enabling evaluation of models' abilities to reason across diverse query formats.", + "bbox": [ + 169, + 612, + 823, + 669 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In terms of language complexity, VISUALPUZZLES was constructed with an emphasis on accessibility. Most of the question text uses Basic English vocabulary2 to minimize the impact of linguistic complexity on reasoning performance, focusing the evaluation strictly on multimodal reasoning.", + "bbox": [ + 169, + 674, + 823, + 734 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Compared to prior benchmarks, VISUALPUZZLES is unique in that it explicitly minimizes domain-specific knowledge requirements while maintaining high reasoning complexity. We demonstrate these traits of VISUALPUZZLES in Section 5.", + "bbox": [ + 169, + 739, + 823, + 782 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 Experiments and Results", + "text_level": 1, + "bbox": [ + 171, + 801, + 426, + 819 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 Experimental Setup", + "text_level": 1, + "bbox": [ + 171, + 832, + 362, + 849 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We comprehensively evaluated the reasoning abilities of a variety of MLLMs on VISUALPUZZLES. Additionally, we performed human evaluations to better understand the gap between human and models' reasoning capabilities.", + "bbox": [ + 169, + 858, + 826, + 902 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "2https://en.wiktionary.org/wiki/Appendix:Basic_English_word_list", + "bbox": [ + 189, + 909, + 658, + 922 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We selected a diverse set of proprietary and open MLLMs to ensure broad coverage in terms of model architecture, training scale, and intended application domains. This diversity allows us to capture a wide spectrum of current approaches and capabilities in the field. We integrated VISUALPUZZLES into Lmms-eval (Li* et al., 2024).", + "bbox": [ + 169, + 103, + 823, + 161 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Proprietary Models. We evaluate several leading proprietary models that represent the current state of the art: (1) GPT-4o, o1, o3, and o4-mini (OpenAI, 2024; Jaech et al., 2024); (2) Gemini-1.5-Pro, Gemini-2.0-Flash, Gemini-2.0-Flash-Thinking, and Gemini-2.5-Pro (Gemini et al., 2023); (3) Claude-3.5-Sonnet and Claude-3.7-Sonnet (Anthropic, 2022). Among these, o1, o3, o4-mini are explicitly optimized for reasoning, while Gemini-2.0-Flash-Thinking and Claude-3.7-Sonnet incorporate dedicated modules for extensive step-by-step problem-solving.", + "bbox": [ + 169, + 166, + 826, + 265 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Open Models. We further evaluate widely used open MLLMs to gauge how open models compare against proprietary models: (1) LLaVA Series (Liu et al., 2023a; 2024a; Li et al., 2024): LLaVA-1.5 (7B/13B), LLaVA-1.6 (7B/13B/34B), and LLaVA-OV (0.5B/7B/72B); (2) Llama-3.2-Vision-Instruct (11B/90B) (Dubey et al., 2024); (3) Qwen-VL Series (Bai et al., 2024; Yang et al., 2024; Qwen Team, 2025a; 2024): including Qwen-VL, Qwen2-VL (2B/7B/72B-Instruct), Qwen2.5-VL (3B/7B/72B-Instruct), and QvQ-72B-Preview; (4) Cambrian (8B/13B) (Tong et al., 2024); (5) Pangea-7B (Yue et al., 2025).", + "bbox": [ + 169, + 270, + 826, + 369 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We apply both direct multiple-choice prompting and Chain-of-Thought (CoT) prompting to each model, following recent findings that CoT can significantly enhance model reasoning on complex multimodal tasks. For each model we report the best performance, whether achieved by direct multiple-choice prompting or CoT prompting.", + "bbox": [ + 169, + 375, + 823, + 434 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Human Performance. To establish a strong baseline for comparison, we conducted human evaluations with 70 college-level volunteers. Human performance provides a valuable upper-bound reference for assessing the current capabilities and limitations of multimodal reasoning models. While this serves as a benchmark for present-day systems, it is possible that future models could surpass this level of performance. Each participant was randomly assigned a subset of the puzzles and completed them under the same resource-constrained conditions as the models (i.e., without access to external tools or the internet). On average, participants completed each puzzle in 78 seconds, reflecting the typical cognitive load and time demands imposed by VISUALPUZZLES.", + "bbox": [ + 169, + 438, + 826, + 564 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2 Overall Results", + "text_level": 1, + "bbox": [ + 171, + 580, + 328, + 594 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 2 and Figure 1 compare the performance of humans and a selected set of models.3 All evaluated models, even the proprietary ones, perform below the 4th percentile of human accuracy, underscoring the significant gap in multimodal reasoning abilities. These results reinforce our finding that, although models have made progress in multimodal understanding, there remains a substantial margin for improvement before they can match or surpass human performance on multimodal reasoning.", + "bbox": [ + 169, + 606, + 823, + 691 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "This pattern holds across categories as well. In Table 2, top human participants (95th percentile) exhibit near-perfect accuracy on multiple reasoning categories, while model performance remains substantially lower, even lower than the worst human performance (5th percentile). These results emphasize the need for continued innovation in model architectures and training paradigms if we aim to close the gap between model and human intelligence on complex multimodal reasoning.", + "bbox": [ + 169, + 696, + 823, + 782 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 Disentangling Reasoning from Domain Knowledge", + "text_level": 1, + "bbox": [ + 169, + 800, + 668, + 819 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1 Knowledge Intensity of VISUALPUZZLES", + "text_level": 1, + "bbox": [ + 169, + 832, + 519, + 848 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Is VISUALPUZZLES less knowledge-intensive than existing reasoning benchmarks? This question is central to our goal of disentangling reasoning ability from domain-specific", + "bbox": [ + 169, + 858, + 823, + 888 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "3Full results for every model discussed in Section 3 are provided in Appendix D, including separate performance outcomes for both direct multiple-choice and CoT prompting.", + "bbox": [ + 169, + 896, + 823, + 925 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/fbc5dc8093e16ec24a3470e9d0985c3a334bafdc964c8c86c331e8622178f9d0.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelAlgorithmsAnalogicalDeductiveInductiveSpatialOverall
Random Choice25.025.025.025.025.025.0
Human (95th Percentile)100.0100.0100.081.6100.089.3
Human (50th Percentile)88.066.080.050.090.075.0
Human (5th Percentile)68.125.037.00.059.157.5
Proprietary Models
GPT-4o49.258.349.027.326.241.3
o163.768.367.529.234.351.8
o364.568.369.527.342.754.0
o4-mini65.368.775.533.045.557.0
Gemini-2.0-flash55.358.857.024.431.845.0
Gemini-2.0-flash-thinking46.670.149.024.925.542.2
Gemini-2.5-pro60.064.060.029.736.449.5
Claude-3.7-Sonnet64.548.365.026.837.448.3
Claude-3.7-Sonnet-Thinking67.244.161.531.137.148.2
Open Models (Qwen-Based)
LLaVA-OV-7B27.528.040.524.428.029.4
Pangea-7B32.423.738.528.732.531.3
Qwen2.5-VL-7B-Instruct38.223.751.524.931.133.7
LLaVA-OV-72B34.726.537.027.328.730.8
QvQ-72B-Preview44.843.644.026.830.837.8
Qwen2.5-VL-72B-Instruct53.446.958.025.829.542.3
Open Models (Llama-Based)
Cambrian-8B31.324.236.024.029.028.9
Llama-3.2-11B-Vision-Instruct31.030.839.021.126.229.4
Llama-3.2-90B-Vision-Instruct45.023.243.026.331.534.1
", + "bbox": [ + 171, + 99, + 828, + 462 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "knowledge. Many current benchmarks blur this line, making it difficult to assess general reasoning in non-expert settings. VISUALPUZZLES was designed to target visual reasoning skills while deliberately minimizing reliance on specialized knowledge.", + "bbox": [ + 169, + 540, + 823, + 583 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To test whether VISUALPUZZLES achieves this goal, we prompted GPT-4o to generate \"knowledge concept checklists\" for 50 randomly selected questions from a widely-used knowledge-intensive reasoning dataset MMMU and 50 from VISUALPUZZLES. We manually verified each question as discussed in subsection E.3. Each checklist comprises knowledge-specific questions intended to assess whether a model possesses the background information required to solve the original problem. For example, if a question depends on understanding two distinct physics laws, its checklist would include a question to explain each. The number of checklist items per instance serves as a proxy for knowledge intensity.", + "bbox": [ + 169, + 589, + 823, + 702 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We found that MMMU problems resulted in significantly more checklist items on average (3.9) compared to VISUALPUZZLES (1.1), as shown in Table 3. This supports the hypothesis that VISUALPUZZLES is substantially less reliant on domain knowledge. As a result, performance on VISUALPUZZLES more directly reflects a model's ability to reason over visual and textual content, offering", + "bbox": [ + 169, + 707, + 531, + 819 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/5abe78030b7037208b41af28065a5b555d55d83ad1baf370ba0a12ff565461b1.jpg", + "table_caption": [ + "Table 2: Performance (%) comparison of humans and selected models on VISUALPUZZLES. We report the best performance resulting from direct multiple-choice prompting and CoT prompting for each method. We highlighted all the reasoning models." + ], + "table_footnote": [], + "table_body": "
Benchmark# Knowledge Qs.
MMMU3.9
VISUALPUZZLES1.1
", + "bbox": [ + 544, + 705, + 821, + 762 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 3: AVG. number of knowledge concept questions generated per instance on MMMU vs. VISUALPUZZLES.", + "bbox": [ + 539, + 766, + 823, + 808 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "a clearer signal of progress in multimodal reasoning. Full prompt examples and further discussion are provided in Appendix E.", + "bbox": [ + 169, + 819, + 823, + 848 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Do models already possess the knowledge required to solve VISUALPUZZLES? To explore this, we measured models' knowledge accuracy—their ability to answer the knowledge checklist questions correctly—on both benchmarks. This metric reflects how much of the required knowledge is already known by the model, independent of reasoning. We found a stark contrast: while many models exceed $90\\%$ knowledge accuracy on VISUALPUZZLES,", + "bbox": [ + 169, + 854, + 825, + 925 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/422bc896f6eb461b560d28a6bcdb7b46675ce08dc157a69ee3ea72239fe20f5f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 178, + 104, + 517, + 263 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/dd2c3eda1142f57bd67e35d01e2305f06caa0abe215815db4166a95aaa2a731c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 519, + 103, + 821, + 263 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/8ac1191f2d067a68d549fd32aad8c30f764923b7f3097b435600a39acd0bbc64.jpg", + "image_caption": [ + "Figure 3: Scatter plots with trend lines of the relationship between accuracy and model size (top) and the relationship between reasoning and knowledge accuracy (bottom) on MMMU and VISUALPUZZLES. The dots' sizes represent relative model sizes. The correlation between reasoning accuracy and knowledge accuracy is higher on MMMU (0.8) than on VISUALPUZZLES (0.4)." + ], + "image_footnote": [], + "bbox": [ + 178, + 273, + 514, + 431 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/085189f854c325765c80bfc7f114017ecfa432448aa0ef26f6e006d72565f3fc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 516, + 273, + 816, + 431 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "most score below $60\\%$ on MMMU, with smaller models frequently dropping under $50\\%$ . Only the largest models approach $80\\%$ accuracy on MMMU, underscoring its heavier reliance on domain-specific knowledge.", + "bbox": [ + 169, + 542, + 826, + 585 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Does scaling up model size improve performance? We also plot reasoning accuracy (i.e., overall performance on the benchmark) in Figure 3, revealing some interesting trends:", + "bbox": [ + 169, + 590, + 825, + 621 + ], + "page_idx": 6 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- MMMU. Larger models tend to have higher knowledge accuracy, and this often translates into higher overall benchmark performance. This aligns with MMMU's reliance on domain-specific understanding; models with more parameters and training data are better at recalling relevant factual knowledge, thus improving their overall performance.", + "- VISUALPUZZLES. Although many models achieve near- $100\\%$ knowledge accuracy on VISUALPUZZLES, we observe no clear increase in both knowledge and reasoning accuracy as model size grows. In contrast to MMMU, simply scaling number of parameters does not guarantee better performance on VISUALPUZZLES, implying that further gains on VISUALPUZZLES must stem from improvements in models' reasoning abilities rather than reliance on extensive knowledge." + ], + "bbox": [ + 171, + 630, + 823, + 773 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "What is the relationship between knowledge and reasoning? Figure 3 shows two scatter plots with trend lines that measure how knowledge accuracy correlates with reasoning accuracy across different open models, where the relative sizes of the dots represent the sizes of the models. On MMMU (left), there is a strong positive correlation (0.8), suggesting that a model possessing more knowledge strongly correlates better reasoning performance. In contrast, VISUALPUZZLES (right) exhibits a more modest correlation (0.4). Although there is still an upward trend, gains in knowledge accuracy lead to smaller improvements in reasoning accuracy. This discrepancy implies that while overcoming knowledge gaps is central to reasoning success on MMMU, VISUALPUZZLES tasks demand more nuanced inference steps that depends less on domain knowledge.", + "bbox": [ + 169, + 784, + 826, + 925 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 491, + 946, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Overall, these findings reinforce that VISUALPUZZLES's comparatively lower knowledge requirements are readily met by both proprietary and open models. By contrast, MMMU poses a greater challenge to smaller models in terms of knowledge, for which scaling in size clearly benefits knowledge-intensive tasks. However, on VISUALPUZZLES, larger model size alone is not a decisive factor, which might imply that genuine multimodal reasoning depends on more than just number of parameters or pre-trained knowledge.", + "bbox": [ + 169, + 102, + 826, + 189 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.2 Reasoning Complexity of VISUALPUZZLES", + "text_level": 1, + "bbox": [ + 169, + 205, + 532, + 220 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Do questions in VISUALPUZZLES require more complex reasoning than those in existing benchmarks like MMMU?", + "text_level": 1, + "bbox": [ + 169, + 229, + 823, + 258 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Besides observing that models generally achieve lower accuracy on VISUALPUZZLES compared to MMMU, we further investigated whether this gap stems from increased reasoning complexity. To do so, we measured the proportion of reasoning steps required to solve each question. We began", + "bbox": [ + 169, + 266, + 529, + 349 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "by gathering detailed, step-by-step solutions from the models for each question, which are manually verified for completeness. Then we classified if each step is a logical reasoning step with the help of LLM. We show the result in Table 4. On average, logical reasoning steps take up $14.8\\%$ more total steps in solving VISUALPUZZLES questions compared to those of MMMU (82.1% v.s. 71.5%). This analysis is based on GPT-4o and Gemini-2.0-Flash across 200 randomly sampled questions per benchmark. These results suggest that VISUALPUZZLES demand more extensive reasoning, aligning with its goal of evaluating deeper multimodal reasoning beyond factual recall. Prompt example is shown in Appendix F.", + "bbox": [ + 169, + 349, + 826, + 462 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/574525c730940cd3774fe74971f3aadf225b9b9840f7ef41a6416ea224eee334.jpg", + "table_caption": [], + "table_footnote": [ + "Table 4: Percentage of logical reasoning steps in solving benchmark questions." + ], + "table_body": "
ModelMMMUVISUALPUZZLES
GPT-4o75.1%87.0%
Gemini-2.0-Flash67.9%77.3%
", + "bbox": [ + 544, + 263, + 821, + 308 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3 Do Reasoning Models Perform Better than Their Baselines?", + "text_level": 1, + "bbox": [ + 169, + 477, + 656, + 493 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/097011d809433a79022988b41408051bc1ba5980c55cdc75b274a14638a0f3d4.jpg", + "image_caption": [ + "Figure 4: Comparison of accuracy and average number of total completion tokens of reasoning models and their general counterparts on VISUALPUZZLES. We didn't include Gemini-2.0-Flash models here because Gemini-2.0-Flash-Thinking does not reveal the number of reasoning tokens of responses. The accuracies of Gemini-2.0-Flash and Gemini-2.0-Flash-Thinking is $45.0\\%$ and $42.2\\%$ respectively. Despite much higher number of completion tokens, reasoning models do not often achieve better performance on VISUALPUZZLES." + ], + "image_footnote": [], + "bbox": [ + 176, + 512, + 390, + 621 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/9d2390fe3c1b50f4c9e0c3fc610d172eb6b346f542c089a5ccbaa752243e3aff.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 395, + 512, + 606, + 621 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/b3f117d6f51d23f162333ef36f7641cdcf69a8d721c3509ea76381b786e355f5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 609, + 512, + 821, + 622 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Recent reasoning models often scale up inference compute by generating longer chains of thought (CoTs) to enhance reasoning ability. To assess the effectiveness of this strategy on VISUALPUZZLES, we compare several reasoning models with their non-reasoning counterparts in Figure 4. The reasoning model o1 outperforms GPT-4o overall. However, structured \"thinking\" modes, despite much higher number of completion tokens, show no consistent benefit. Similarity of output further reveals that the thinking mode primarily increases vocabulary without meaningfully altering the underlying reasoning process, as illustrated in Figure 13.", + "bbox": [ + 169, + 739, + 826, + 853 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4 Are Branching and Revalidation Reasoning Patterns Effective on VISUALPUZZLES?", + "text_level": 1, + "bbox": [ + 169, + 869, + 825, + 883 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "As discussed in Section 4.3, reasoning-enabled models do not consistently outperform their non-reasoning counterparts on VISUALPUZZLES. To better understand this discrepancy, we", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/b536792942d0b01abaf60cfa156d36a936bb060f4df1b04a5055134b1d998792.jpg", + "image_caption": [ + "Figure 5: Comparison of Reasoning Pattern of Claude-3.7-Sonnet-Thinking on MMMU and VISUALPUZZLES. Left figure compares the accuracy of Claude-3.7-Sonnet and Claude-3.7-Sonnet-Thinking on MMMU and VISUALPUZZLES. Middle figure shows frequency of each pattern. Right figure shows correlation of the patterns with accuracy on the benchmarks." + ], + "image_footnote": [], + "bbox": [ + 174, + 101, + 419, + 199 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/523a7fd5906283d19e4b6b98b68b5d07b68a0003d35bf321828d973d89158391.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 419, + 101, + 666, + 200 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/1662aa990cd75de3f0e4e15080707ff2d3e5d543c8a719718bfdc6202b92da36.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 678, + 103, + 823, + 200 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "examine Claude-3.7-Sonnet-Thinking's reasoning behaviors present in long CoTs, specifically, branching and re-validation, which are known to play important roles in enhancing reasoning performance4.", + "bbox": [ + 169, + 279, + 826, + 325 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "As shown in Figure 5, our analysis reveals a striking contrast between benchmarks. On MMMU, both branching and re-validation correlate positively with model accuracy. These strategies help models explore alternative reasoning paths and revisit earlier steps, aiding in the retrieval of relevant factual knowledge, an essential component for solving MMMU's knowledge-intensive questions. An illustrative example is provided in Appendix E.", + "bbox": [ + 169, + 330, + 823, + 402 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Surprisingly, on VISUALPUZZLES, these reasoning behaviors are more frequent, yet less predictive of success. Despite their increased presence in long-form responses, we observe no significant correlation between these strategies and task accuracy. This suggests that models may be using branching and re-validation in ways that do not meaningfully contribute to solving the problem.", + "bbox": [ + 169, + 407, + 486, + 547 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Figure 6 highlights this with an example from Claude-3.7-Sonnet-Thinking, where the model applies branching on a VISUALPUZZLES puzzle. However, the additional reasoning paths remain shallow and fail to engage with the core challenge—understanding the spatial arrangement of chairs in the image. The full response is included in Appendix E.", + "bbox": [ + 169, + 554, + 486, + 681 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 Analysis", + "text_level": 1, + "bbox": [ + 171, + 700, + 285, + 718 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Question: Given a pair of chairs arranged at a specific angle, select the option that matches the angle of the chairs in the question. (Answer: C)", + "bbox": [ + 500, + 407, + 815, + 446 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/d90d53e47cf6e5230fb7578325d85a11aeb477745ec81d8999ae9db553e82ac7.jpg", + "image_caption": [ + "Figure 6: An example of Claude-3.7-Sonnet-Thinking utilizing branching to solve a VISUALPUZZLES puzzle." + ], + "image_footnote": [], + "bbox": [ + 500, + 448, + 818, + 516 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Claude-3.7-Sonnet (With Thinking) \nLet me examine the chairs in the question: \n- The chairs are positioned 180 degrees apart. \n- They are shown in profile view. \nOption A: ...; Option B: ...; Option C: ...; Option D: ... Any of them is correct from an angular perspective.. \nHowever, looking more carefully at the exact orientation: \n- The original chairs are shown in perfect profile view \n- Option A shows chairs in perfect profile view, .... .. \nI'll go with Option A as it presents the cleanest profile view that matches the arrangement in the question.", + "bbox": [ + 501, + 518, + 818, + 667 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5.1 Do Models Approach VISUALPUZZLES Questions Differently?", + "text_level": 1, + "bbox": [ + 169, + 732, + 679, + 750 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/e39d29d3f3dbe47128d3e18cab78b68ccdeb0175590f89a101b716c36c4cfca3.jpg", + "table_caption": [ + "Table 5 shows the statistics of Claude-3.7-Sonnet-Thinking's answering strategy. We observe a clear divergence in answering strategies between MMMU and VISUALPUZZLES. On MMMU, the" + ], + "table_footnote": [], + "table_body": "
BenchmarkAnswer-FirstOption-First
MMMU29.3%70.7%
VISUALPUZZLES (Image Options)72.5%27.5%
VISUALPUZZLES (Text Options)98.3%1.7%
", + "bbox": [ + 452, + 756, + 821, + 816 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Table 5: Answering Strategy", + "bbox": [ + 531, + 818, + 740, + 834 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "model tend to follow an option-driven approach—using the provided choices early to eliminate unlikely answers and select the most relevant one, often without explicitly solving the problem. In contrast, models more frequently adopt an answer-first strategy on VISUALPUZZLES, attempting to solve the question independently before comparing", + "bbox": [ + 169, + 842, + 823, + 900 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_footnote", + "text": "4We examined Claude-3.7-Sonnet-Thinking as it explicitly provides thinking output.", + "bbox": [ + 189, + 909, + 740, + 925 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "the result to the answer choices. This pattern holds across both textual and image-based options, though the option-first approach appears slightly more often (around $30\\%$ ) for image-based tasks—likely due to the added complexity of visual comparison.", + "bbox": [ + 169, + 102, + 823, + 148 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "5.2 Does model performance transfer between reasoning categories?", + "text_level": 1, + "bbox": [ + 171, + 169, + 692, + 186 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/97e95fdf454706c7da694622cce81f46164cd08513842a89a130211e7210be6f.jpg", + "image_caption": [ + "Figure 7: Correlation Heatmap among reasoning categories for models (averaged across all models we evaluated)." + ], + "image_footnote": [], + "bbox": [ + 178, + 196, + 450, + 388 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Figure 7 presents a correlation heatmap illustrating the relationships among the five reasoning categories in VISUALPUZZLES. We report model correlations averaged across all models in Table 2. For humans, each reasoning category likely engages different cognitive or mental processes (Goel & Dolan, 2004; Green et al., 2010; Bright & Feeney, 2014; Babcock & Vallesi, 2015), so performance in one category might not transfer to performance in another. However, the correlation heatmap of the models tells a different story. We observe notably strong correlations across reasoning categories, with values ranging from 0.11 to as high as 0.94. In particular, algorithmic and deductive reasoning show high correlation (0.94), and other pairs such as algorithmic-analogical and deductive-analogical also exhibit strong associations. This suggests", + "bbox": [ + 477, + 196, + 826, + 448 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "that model performance tends to generalize across categories. However, this generalization may not reflect true reasoning abilities. Instead, the high correlations could indicate that models are leveraging shared surface-level patterns or shortcut strategies that happen to work across multiple structurally different categories, unlike humans, who may rely on distinct cognitive processes.", + "bbox": [ + 169, + 448, + 823, + 518 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "5.3 Error Analysis", + "text_level": 1, + "bbox": [ + 171, + 540, + 320, + 556 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Figure 8 shows a pie chart illustrating the distribution of error categories of 100 instances generated by Claude-3.7-Sonnet-Thinking on VISUALPUZZLES, revealing that reasoning errors dominate at $56\\%$ , reinforcing the fact that reasoning is greatest challenge to models in VISUALPUZZLES. Perceptual errors $(21\\%)$ and spatial / orientation errors $(17\\%)$ also constitute substantial portions of failures, reflecting difficulties in interpreting visual elements and understanding spatial relationships. These three categories together account for $94\\%$ of mistakes, emphasizing a need for multimodal models with stronger reasoning capabilities with more robust perception and spatial understanding. Textual and visual understanding errors $(4\\%)$ and reject-to-answer cases $(2\\%)$ are relatively rare. Appendix I shows samples of error and correct cases of each reasoning and difficulty category.", + "bbox": [ + 169, + 568, + 583, + 792 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/9be1b71d51b868e947ea438ebbee99b5295830e1be6a90035757235c7e3f403e.jpg", + "image_caption": [ + "Figure 8: Error Distribution of Claude-3.7-Sonnet-Thinking" + ], + "image_footnote": [], + "bbox": [ + 594, + 537, + 823, + 752 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "6 Related Work", + "text_level": 1, + "bbox": [ + 171, + 818, + 328, + 834 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Multimodal Language Models (MLLMs), particularly vision language models have experienced significant improvements recently. Large scale vision language models (Gemini et al., 2023); (OpenAI, 2024); (Anthropic, 2022); including open weight ones (Li et al., 2024); (Yue et al., 2025); (Liu et al., 2024b); (Tong et al., 2024); (Dubey et al., 2024) are capable of utilizing both image and text inputs to solve challenging questions.", + "bbox": [ + 169, + 853, + 826, + 926 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 344, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Multimodal reasoning models, models that specialize in complex reasoning, further push the boundary of MLLMs' capabilities. Large scale multimodal reasoning models such as QVQ (Qwen Team, 2024), Claude-3.7-Sonnet-thinking (Anthropic, 2022), o1 (Jaech et al., 2024), Gemini-2.0-flash-thinking (Gemini et al., 2023) excel in reasoning heavy tasks such as coding and solving math problems.", + "bbox": [ + 169, + 103, + 826, + 175 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Multimodal Reasoning Benchmarks. There exists a number of multimodal benchmarks that test on both the models' world knowledge and reasoning abilities. These benchmarks (Yue et al., 2024a); (Marino et al., 2019); (Liu et al., 2023b); (Yue et al., 2024b); (Authors, 2025) emphasize on the multimodal ability of models as a whole, without further separation of knowledge and reasoning.", + "bbox": [ + 169, + 180, + 826, + 253 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Recently, more multimodal benchmarks have placed emphasis on multimodal logical reasoning abilities. Many of them (Lu et al., 2023); (Wang et al., 2024b) focus primarily on mathematic problems, testing on both mathematical knowledge and reasoning. Some others cover on more general logical reasoning problems (Cherian et al., 2022b); (Gao et al., 2023), testing on both models' knowledge and reasoning in different domains.", + "bbox": [ + 169, + 257, + 828, + 329 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "7 Conclusion and Future Work", + "text_level": 1, + "bbox": [ + 171, + 354, + 465, + 371 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We presented VISUALPUZZLES, a novel multimodal benchmark carefully designed to minimize the impact of domain-specific knowledge and isolate models' core reasoning capabilities. Our results show that while proprietary and large-scale open models achieve relatively higher performance, they still fall short of human-level reasoning—especially on more complex tasks such as analogical and inductive reasoning. Moreover, we observe that strong performance on knowledge-intensive benchmarks like MathVista and MMMU does not necessarily translate into high accuracy on VISUALPUZZLES, underscoring the distinct challenge of knowledge-light reasoning tasks.", + "bbox": [ + 169, + 388, + 828, + 503 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "These findings suggest that purely scaling model size and knowledge resources may not suffice for robust multimodal reasoning skills; rather, methods that promote structured reasoning, such as explicit thinking modes or recursive reasoning steps, can offer substantial improvements, particularly for hard questions. Future research can explore new training strategies, specialized architectures, or model interpretations tailored to reduce reliance on memorized facts and enhance logical inference. Extending VISUALPUZZLES to include additional types of multi-image reasoning or temporally dynamic visual information may further stress-test models' core inference abilities. By disentangling domain knowledge from multimodal reasoning, we hope VISUALPUZZLES will serve as a valuable tool for developing and evaluating next-generation MLLMs that excel at genuinely understanding and reasoning about the world without depending heavily on specialized factual knowledge.", + "bbox": [ + 169, + 508, + 828, + 662 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "8 Limitations", + "text_level": 1, + "bbox": [ + 171, + 688, + 310, + 704 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Disentangling Knowledge Despite our best efforts to isolate domain-specific knowledge from the evaluation of multimodal reasoning, VISUALPUZZLES is still not entirely free of knowledge dependencies. Basic familiarity with everyday objects or common scenarios is still required; complete knowledge free evaluation remains an ideal rather than a practical reality.", + "bbox": [ + 169, + 724, + 825, + 797 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Real World Application VISUALPUZZLES emphasizes puzzle-like questions that may not reflect the full diversity of real-world scenarios, limiting generalizability to more specialized domains.", + "bbox": [ + 169, + 816, + 825, + 859 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Question Format VISUALPUZZLES focuses on multiple-choice questions, which may not capture the breadth of open-ended reasoning tasks where models must generate complex textual or visual outputs.", + "bbox": [ + 169, + 881, + 825, + 925 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Future work can address these limitations by including more varied question formats, broader domains, and more granular analyses of a model's knowledge versus its multimodal reasoning abilities.", + "bbox": [ + 169, + 103, + 826, + 148 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "9 Ethical Statement", + "text_level": 1, + "bbox": [ + 171, + 167, + 362, + 184 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "This paper uses samples extracted from existing quiz sources for scholarly analysis and testing purposes, in accordance to US fair use law and standard practice. These data are neither intended for, nor capable of, substituting for the original works; thus, we believe their inclusion does not diminish the market value or utility of the source materials. A complete list of references for the data sources is attached in Appendix A.", + "bbox": [ + 169, + 200, + 823, + 272 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Acknowledgements", + "text_level": 1, + "bbox": [ + 171, + 291, + 354, + 309 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "This project was supported in part by a grant from DSTA Singapore and the Carnegie Bosch Institute. The authors would like to thank CMU NeuLab colleagues for their constructive comments. The authors would also like to thank all volunteers who participated in the human evaluation.", + "bbox": [ + 169, + 324, + 823, + 380 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 171, + 402, + 274, + 417 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "https://www.anthropic.com/index/introducing-claudeAnthropic. Claude, 2022. URL https://www.anthropic.com/index/introducing-claude.", + "Humanity's Last Exam's Authors. Humanity's last exam. ArXiv, abs/2501.14249, 2025. URL https://api-semanticscholar.org/CorpusID:275906652.", + "Laura Babcock and Antonino Vallesi. The interaction of process and domain in prefrontal cortex during inductive reasoning. Neuropsychologia, 67:91-99, 2015.", + "Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-VL: A versatile vision-language model for understanding, localization, text reading, and beyond, 2024. URL https://openreview.net/forum?id=qrGjFJV13m.", + "Yonatan Bitton, Ron Yosef, Eliyahu Strugo, Dafna Shahaf, Roy Schwartz, and Gabriel Stanovsky. Vasr: Visual analogies of situation recognition. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 37, pp. 241-249, 2023.", + "Aimée K Bright and Aidan Feeney. Causal knowledge and the development of inductive reasoning. Journal of Experimental Child Psychology, 122:48-61, 2014.", + "Anoop Cherian, Kuan-Chuan Peng, Suhas Lohit, Kevin Smith, and Joshua B Tenenbaum. Are deep neural networks smarter than second graders? arXiv preprint arXiv:2212.09993, 2022a.", + "Anoop Cherian, Kuan-Chuan Peng, Suhas Lohit, Kevin A. Smith, and Joshua B. Tenenbaum. Are deep neural networks smarter than second graders? 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 10834-10844, 2022b. URL https://api-semanticscholar.org/CorpusID:254877678.", + "DeepSeek-AI. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning, 2025. URL https://arxiv.org/abs/2501.12948.", + "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. ArXiv preprint, abs/2407.21783, 2024. URL https://arxiv.org/abs/2407.21783." + ], + "bbox": [ + 173, + 426, + 826, + 922 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 344, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Jingying Gao, Qi Wu, Alan Blair, and Maurice Pagnucco. Lora: A logical reasoning augmented dataset for visual question answering. In Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2023.", + "Gemini, Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, et al. Gemini: a family of highly capable multimodal models. ArXiv preprint, abs/2312.11805, 2023. URL https://arxiv.org/abs/2312.11805.", + "Vinod Goel and Raymond J Dolan. Differential involvement of left prefrontal cortex in inductive and deductive reasoning. Cognition, 93(3):B109-B121, 2004.", + "Adam E Green, David JM Kraemer, Jonathan A Fugelsang, Jeremy R Gray, and Kevin N Dunbar. Connecting long distance: semantic distance in analogical reasoning modulates frontopolar cortex activity. Cerebral cortex, 20(1):70-76, 2010.", + "Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024.", + "Bo Li*, Peiyuan Zhang*, Kaicheng Zhang*, Fanyi Pu*, Xinrun Du, Yuhao Dong, Haotian Liu, Yuanhan Zhang, Ge Zhang, Chunyuan Li, and Ziwei Liu. Lmms-eval: Accelerating the development of large multimoal models, March 2024. URL https://github.com/EvolvingLMMs-Lab/lmms-eval.", + "Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024.", + "Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning, 2023a. URL https://arxiv.org/abs/2310.03744.", + "Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava-last: Improved reasoning,OCR, and world knowledge, 2024a. URL https://arxiv.org/pdf/2401.13601.", + "Jian Liu, Leyang Cui, Hanmeng Liu, Dandan Huang, Yile Wang, and Yue Zhang. Logiqa: A challenge dataset for machine reading comprehension with logical reasoning, 2020.", + "Junpeng Liu, Tianyue Ou, Yifan Song, Yuxiao Qu, Wai Lam, Chenyan Xiong, Wenhu Chen, Graham Neubig, and Xiang Yue. Harnessing webpage uis for text-rich visual understanding. ArXiv, abs/2410.13824, 2024b. URL https://api(semanticscholar.org/ CorpusID:273403951.", + "Yuanzhan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, Kai Chen, and Dahua Lin. Mmbench: Is your multi-modal model an all-around player? In European Conference on Computer Vision, 2023b. URL https://api_semanticscholar.org/CorpusID:259837088.", + "Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. arXiv preprint arXiv:2310.02255, 2023.", + "Kenneth Marino, Mohammad Rastegari, Ali Farhadi, and Roozbeh Mottaghi. Ok-vqa: A visual question answering benchmark requiring external knowledge. 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 3190-3199, 2019. URL https://api_semanticscholar.org/CorpusID:173991173.", + "OpenAI. Hello gpt4-o. https://openai.com/index/hello-gpt-4o/, 2024. URL https://openai.com/index/hello-gpt-4o/.", + "Qwen Team. Qvq: To see the world with wisdom, December 2024. URL https://qwenlm.github.io/blog/qvq-72b-preview/." + ], + "bbox": [ + 171, + 102, + 828, + 924 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Qwen Team. Qwen2.5-vl, January 2025a. URL https://qwenlm.github.io/blog/qwen2.5-v1/.", + "Qwen Team. Qwq-32b: Embracing the power of reinforcement learning, March 2025b. URL https://qwenlm.github.io/blog/qwq-32b/.", + "Jonathan Roberts, Kai Han, Neil Houlsby, and Samuel Albanie. SciFIBench: Benchmarking large multimodal models for scientific figure interpretation. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2024. URL https://openreview.net/forum?id=HcLFNuQwy5.", + "Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, et al. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. ArXiv preprint, abs/2406.16860, 2024. URL https://arxiv.org/abs/2406.16860.", + "Ke Wang, Junting Pan, Weikang Shi, Zimu Lu, Houxing Ren, Aojun Zhou, Mingjie Zhan, and Hongsheng Li. Measuring multimodal mathematical reasoning with math-vision dataset. Advances in Neural Information Processing Systems, 37:95095-95169, 2024a.", + "Ke Wang, Junting Pan, Weikang Shi, Zimu Lu, Mingjie Zhan, and Hongsheng Li. Measuring multimodal mathematical reasoning with math-vision dataset, 2024b. URL https:// arxiv.org/abs/2402.14804.", + "An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, et al. Qwen2 technical report. ArXiv preprint, abs/2407.10671, 2024. URL https://arxiv.org/abs/2407.10671.", + "Xiang Yue, Yuansheng Ni, Kai Zhang, Tianyu Zheng, Ruoqi Liu, Ge Zhang, Samuel Stevens, Dongfu Jiang, Weiming Ren, Yuxuan Sun, Cong Wei, Botao Yu, Ruibin Yuan, Renliang Sun, Ming Yin, Boyuan Zheng, Zhenzhu Yang, Yibo Liu, Wenhao Huang, Huan Sun, Yu Su, and Wenhu Chen. Mmmu: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi. In Proceedings of CVPR, 2024a.", + "Xiang Yue, Tianyu Zheng, Yuansheng Ni, Yubo Wang, Kai Zhang, Shengbang Tong, Yuxuan Sun, Botao Yu, Ge Zhang, Huan Sun, et al. Mmmu-pro: A more robust multi-discipline multimodal understanding benchmark. arXiv preprint arXiv:2409.02813, 2024b.", + "Xiang Yue, Yueqi Song, Akari Asai, Simran Khanuja, Anjali Kantharuban, Seungone Kim, Jean de Dieu Nyandwi, Lintang Sutawika, Sathyanarayanan Ramamoorthy, and Graham Neubig. Pangea: A fully open multilingual multimodal LLM for 39 languages. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=a3g214yEys." + ], + "bbox": [ + 171, + 102, + 828, + 666 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 344, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Table of Contents in Appendix", + "text_level": 1, + "bbox": [ + 171, + 99, + 506, + 122 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A VISUALPUZZLES Statistics 16", + "text_level": 1, + "bbox": [ + 174, + 141, + 825, + 156 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "A.1 Breakdown of Statistics of VISUALPUZZLES 16", + "A.2 Data Sources 16" + ], + "bbox": [ + 196, + 162, + 825, + 199 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B Model Evaluation Setup 16", + "text_level": 1, + "bbox": [ + 173, + 218, + 825, + 234 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "C Human Annotation Setup 16", + "text_level": 1, + "bbox": [ + 173, + 252, + 825, + 268 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "C.1 Difficulty Labeling 16", + "C.2 Reasoning Category Labeling 17" + ], + "bbox": [ + 196, + 273, + 825, + 311 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "D Full Results 17", + "text_level": 1, + "bbox": [ + 173, + 328, + 825, + 343 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "D.1 Full Results w/ CoT 17", + "D.2 Full Results w/n CoT 17" + ], + "bbox": [ + 197, + 349, + 825, + 386 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "E Knowledge Checklist 17", + "text_level": 1, + "bbox": [ + 173, + 405, + 825, + 421 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "E.1 Knowledge Checklist Generation 17", + "E.2 Example Knowledge Checklist Question 20", + "E.3 Knowledge Checklist Human Annotation 20" + ], + "bbox": [ + 197, + 426, + 825, + 484 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "F Reasoning Complexity 20", + "text_level": 1, + "bbox": [ + 173, + 503, + 825, + 518 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "G Comparison with Other Benchmarks 20", + "text_level": 1, + "bbox": [ + 173, + 537, + 825, + 553 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "H Additional Analysis 21", + "text_level": 1, + "bbox": [ + 173, + 571, + 825, + 587 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "H.1 Proprietary V.S. Open Models 21", + "H.2 Reasoning Category and Difficulty Levels 21", + "H.3 Option Types and Difficulty Levels 24", + "H.4 Case Study of Reasoning 25", + "H.5 Impact of CoT 25" + ], + "bbox": [ + 197, + 593, + 825, + 694 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "I Case Study 27", + "text_level": 1, + "bbox": [ + 173, + 712, + 825, + 728 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A VISUALPUZZLES Statistics", + "text_level": 1, + "bbox": [ + 171, + 101, + 449, + 118 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A.1 Breakdown of Statistics of VISUALPUZZLES", + "text_level": 1, + "bbox": [ + 171, + 133, + 545, + 148 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/97f4b7976903cd9ad74954a9240e4fb1d04f429d28e0ec5595ed2893a243218d.jpg", + "table_caption": [ + "Table 6 shows a breakdown of statistics of VISUALPUZZLES questions." + ], + "table_footnote": [], + "table_body": "
Reasoning CategoryImage OptionsText OptionsTotal
EasyMediumHardEasyMediumHard
Algorithmic21801241009262
Analogical1208110000211
Deductive29242457921200
Inductive770127320209
Spatial12341661523286
Total300224145233233331168
", + "bbox": [ + 192, + 186, + 803, + 327 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A.2 Data Sources", + "text_level": 1, + "bbox": [ + 171, + 383, + 316, + 398 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Chinese Civil Service Examination (中国国家公务员考试) 5 (224 puzzles): we manually translated questions from this exam to English from Chinese.", + "Textbooks (210 puzzles): we carefully collected and re-purposed questions from online resources and textbooks.", + "- Smart-101 (Cherian et al., 2022a) (247 puzzles): we carefully selected images from this benchmark and synthesized new questions.", + "- MATH-Vision (Wang et al., 2024a) (293 puzzles): we carefully selected and repurposed questions from this benchmark.", + "VASR (Bitton et al., 2023) (194 puzzles): we carefully selected questions from this benchmark." + ], + "bbox": [ + 212, + 404, + 825, + 561 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B Model Evaluation Setup", + "text_level": 1, + "bbox": [ + 171, + 583, + 426, + 602 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/4cfbe405921837246d7d60cac33ea7bec0b336bb12e4e84a9fe4589d15475253.jpg", + "table_caption": [ + "Table 6: Number of questions in each reasoning category, option types, and difficulty levels." + ], + "table_footnote": [], + "table_body": "
Model Evaluation Prompt with Chain-of-Thought
Solve the multiple-choice question and then answer with the option letter from the given choices. The last line of your response should be of the following format: 'Answer: $LETTER' (without quotes) where LETTER is one of options. Think step by step before answering.
Model Evaluation Prompt w/n Chain-of-Thought
Answer the question with the option's letter from the given choices directly.
", + "bbox": [ + 173, + 618, + 820, + 733 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C Human Annotation Setup", + "text_level": 1, + "bbox": [ + 171, + 753, + 442, + 773 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C.1 Difficulty Labeling", + "text_level": 1, + "bbox": [ + 171, + 786, + 359, + 803 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Each question was also carefully assigned a difficulty label from easy, medium, or hard, based on the cognitive load required for reasoning.", + "bbox": [ + 169, + 811, + 826, + 843 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Easy Level questions could be solved by the annotator in less than one minute.", + "- Medium Level questions could be solved by the annotator in one to three minutes." + ], + "bbox": [ + 212, + 853, + 825, + 888 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_footnote", + "text": "5https://en.wikipedia.org/wiki/Civil服务体系_of_the_People%27s_Republic_of_China#Examinations.", + "bbox": [ + 171, + 896, + 777, + 922 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "- Hard Level questions require the annotator more than five minutes to solve or quit solving.", + "bbox": [ + 212, + 103, + 823, + 133 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Annotation Guideline for Puzzle Difficulty", + "text_level": 1, + "bbox": [ + 178, + 172, + 490, + 188 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Try to solve the puzzle first. You need to measure the time you attempted to solve each puzzle. Then, select from Easy, Medium, or Hard based on the time required.", + "bbox": [ + 176, + 194, + 818, + 220 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Easy Level: You can solve or answer the question within 1 minute. This level of puzzles should require minimal reasoning.", + "- Medium Level: You can solve or answer the question within 1-3 minutes. This level of puzzles should demand moderate reasoning.", + "- Hard Level: You can / cannot solve this question with more than 5 minutes. This level of puzzles should involve significant / multi-step reasoning." + ], + "bbox": [ + 176, + 220, + 816, + 296 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C.2 Reasoning Category Labeling", + "text_level": 1, + "bbox": [ + 171, + 332, + 437, + 349 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Annotation Guideline for Puzzle Reasoning Category", + "text_level": 1, + "bbox": [ + 178, + 371, + 568, + 387 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Assign the category that best describes the primary type of reasoning or logic required for each puzzle:", + "- Algorithmic Reasoning: Involves following or devising a step-by-step procedure or rule-based process.", + "- Analogical Reasoning: Requires identifying relationships by comparison between pairs of entities.", + "- Deductive Reasoning: Involves deriving specific conclusions from general or given premises.", + "- Inductive Reasoning: Focuses on generalizing a rule or pattern from specific instances.", + "- Spatial Reasoning: Involves visualizing and manipulating shapes, distances, or orientations." + ], + "bbox": [ + 176, + 393, + 816, + 494 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "D Full Results", + "text_level": 1, + "bbox": [ + 171, + 535, + 318, + 550 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "D.1 Full Results w/ CoT", + "text_level": 1, + "bbox": [ + 171, + 575, + 366, + 590 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "D.2 Full Results w/n CoT", + "text_level": 1, + "bbox": [ + 171, + 609, + 375, + 623 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "E Knowledge Checklist", + "text_level": 1, + "bbox": [ + 171, + 643, + 401, + 662 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "E.1 Knowledge Checklist Generation", + "text_level": 1, + "bbox": [ + 171, + 685, + 464, + 700 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Prompt to Generate Knowledge Checklist Questions", + "text_level": 1, + "bbox": [ + 178, + 724, + 558, + 739 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "You are an exam writer. You are now writing a knowledge test. You are given a question (Question) regarding an image and its standard solution (Solution), your task is to write free response questions that test on individual knowledge required in answering the question correctly.", + "bbox": [ + 176, + 744, + 816, + 786 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "You should follow these steps to complete the task:", + "bbox": [ + 178, + 795, + 511, + 809 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. explicitly analyze the given image, Question, and Solution", + "2. explicitly list out the individual knowledge concepts required to reach Solution.", + "3. write free response questions to test on the definition of each concept listed. Your generated questions should not include details of the given Question. Note that you need to provide answer keys to these questions too.", + "4. format the free response questions in json format." + ], + "bbox": [ + 179, + 810, + 816, + 885 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Question: question", + "bbox": [ + 178, + 897, + 305, + 910 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Solution: answer", + "bbox": [ + 179, + 910, + 292, + 921 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 344, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/ed5578a37fe0c76ed27e20a91c7422833b5c24ad0a429cef3aaf0b4bb114d8e2.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelAlgorithmicAnalogicalDeductiveInductiveSpatialOverall
Random Choice25.025.025.025.025.025.0
Human (95th Percentile)100.0100.0100.081.6100.089.3
Human (50th Percentile)88.066.080.050.090.075.0
Human (5th Percentile)68.125.037.00.059.157.5
Proprietary Models
o4-mini65.368.775.533.045.557.0
o364.568.369.527.342.754.0
o163.768.367.529.234.351.8
GPT-4o49.258.349.027.326.241.3
Gemini-2.5-pro60.064.060.029.736.449.5
Gemini-2.0-flash55.358.857.024.431.845.0
Gemini-2.0-flash-thinking46.670.149.024.925.542.2
Gemini-1.5-Pro53.457.458.526.332.545.0
Claude-3.7-Sonnet64.548.365.026.837.448.3
Claude-3.7-Sonnet-thinking67.244.161.531.137.148.2
Claude-3.5-Sonnet53.447.951.525.434.342.4
Open Models
LLaVA-1.5-7B23.321.836.020.619.223.7
LLaVA-1.5-13B24.821.823.025.425.524.2
LLaVA-1.6-7B27.523.730.022.521.324.8
LLaVA-1.6-13B25.225.627.027.323.425.5
LLaVA-1.6-34B29.428.043.024.925.529.7
LLaVA-OV-0.5B21.026.130.522.525.224.8
LLaVA-OV-7B27.926.136.523.425.527.7
LLaVA-OV-72B34.726.537.027.328.730.8
Llama-3.2-11B-Vision-Instruct31.030.839.021.126.229.4
Llama-3.2-90B-Vision-Instruct45.023.243.026.331.534.1
Qwen-VL21.431.325.026.324.125.3
Qwen2-VL-72B41.628.439.522.529.032.4
QvQ-72B-Preview43.145.548.027.327.637.8
Qwen2-VL-2B-Instruct26.026.124.527.825.526.0
Qwen2-VL-7B-Instruct36.321.838.520.622.727.9
Qwen2-VL-72B-Instruct39.933.545.223.532.434.9
Qwen2.5-VL-3B-Instruct35.127.544.525.824.831.2
Qwen2.5-VL-7B-Instruct40.526.639.024.029.732.1
Qwen2.5-VL-72B-Instruct53.446.958.025.829.542.3
Cambrian-8B31.324.236.024.029.028.9
Cambrian-13B24.825.639.524.421.026.5
Pangea-7B30.528.935.024.425.228.6
", + "bbox": [ + 174, + 226, + 823, + 753 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Table 7: Performance (%) of various models with Chain of Thoughts (CoT) on VISUALPUZZLES.", + "bbox": [ + 171, + 763, + 826, + 792 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 173, + 32, + 344, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/64cd8ec62f766c056e33cbfafa74cd1d10500a34f2b993be19215152727c7859.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelAlgorithmicAnalogicalDeductiveInductiveSpatialOverall
Random Choice25.025.025.025.025.025.0
Human (95th Percentile)100.0100.0100.081.6100.089.3
Human (50th Percentile)88.066.080.050.090.075.0
Human (5th Percentile)68.125.037.00.059.157.5
Proprietary Models
GPT-4o40.834.140.524.929.734.0
Gemini-2.0-flash57.641.758.023.035.743.2
Gemini-1.5-Pro51.246.554.024.929.440.8
Open Models
LLaVA-1.5-7B24.424.734.526.825.526.9
LLaVA-1.5-13B24.426.133.526.328.327.6
LLaVA-1.6-7B27.525.132.524.927.327.4
LLaVA-1.6-13B21.424.729.528.223.125.0
LLaVA-1.6-34B31.327.343.024.427.629.8
LLaVA-OV-0.5B24.425.637.524.925.527.2
LLaVA-OV-7B27.528.040.524.428.029.4
LLaVA-OV-72B31.723.645.021.324.628.8
Llama-3.2-11B-Vision-Instruct27.524.231.026.327.627.3
Llama-3.2-90B-Vision-Instruct38.222.344.525.833.633.1
Qwen-VL23.726.529.527.826.626.6
Qwen2-VL-72B38.928.443.020.629.032.0
QvQ-72B-Preview44.843.644.026.830.837.8
Qwen2-VL-2B-Instruct31.729.440.523.931.531.3
Qwen2-VL-7B-Instruct33.624.246.022.526.230.2
Qwen2-VL-72B-Instruct40.530.346.025.429.434.2
Qwen2.5-VL-3B-Instruct36.326.147.025.822.431.0
Qwen2.5-VL-7B-Instruct38.223.751.524.931.133.7
Qwen2.5-VL-72B-Instruct43.140.351.525.433.738.6
Cambrian-8B25.220.435.023.020.624.5
Cambrian-13B23.328.036.524.926.227.4
Pangea-7B32.423.738.528.732.531.3
", + "bbox": [ + 174, + 276, + 823, + 705 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Table 8: Performance (%) of various models with Multiple Choice Direct prompting on VISUALPUZZLES.", + "bbox": [ + 171, + 715, + 823, + 743 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 173, + 32, + 344, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "E.2 Example Knowledge Checklist Question", + "text_level": 1, + "bbox": [ + 171, + 103, + 516, + 119 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Example Knowledge Checklist Question (MMMU)", + "text_level": 1, + "bbox": [ + 179, + 136, + 547, + 152 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Question: Explain the Arbitrage Pricing Theory (APT) model and its purpose in finance.", + "- Answer: The Arbitrage Pricing Theory (APT) model is a financial theory that estimates the expected return on an asset based on the asset's sensitivity to various macroeconomic factors. It is used to determine the fair price of an asset by considering multiple factors that could affect its return, as opposed to relying on a single market index as in the Capital Asset Pricing Model (CAPM)." + ], + "bbox": [ + 174, + 157, + 816, + 223 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Example Knowledge Checklist Question (VISUALPUZZLES)", + "text_level": 1, + "bbox": [ + 179, + 238, + 609, + 253 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Question: What is the definition of distance in a geometric context?", + "- Answer: Distance in a geometric context refers to the measurement of space between two points." + ], + "bbox": [ + 174, + 260, + 813, + 287 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "E.3 Knowledge Checklist Human Annotation", + "text_level": 1, + "bbox": [ + 171, + 311, + 524, + 325 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "We asked two human annotators to manually verify and correct the knowledge checklist questions and gave them the following instructions. The inter-annotator agreement rate is $87.8\\%$ .", + "bbox": [ + 169, + 338, + 823, + 380 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Human Annotation Instructions", + "text_level": 1, + "bbox": [ + 179, + 393, + 413, + 407 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "You are given a json file, where each item contains the following elements:", + "bbox": [ + 176, + 411, + 661, + 425 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Question: a multiple-choice question.", + "- Answer: the answer to the question with an optional explanation.", + "- Knowledge Concept Checklist: a list of question-answer pairs, where each question in the list is intended to represent a distinct knowledge concept necessary for solving the Question." + ], + "bbox": [ + 176, + 425, + 816, + 474 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "You task is to annotate the knowledge concept checklists generated by a model. You should carefully evaluate each question-answer pair based on the following criteria:", + "bbox": [ + 176, + 486, + 816, + 513 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Necessity: Is the question genuinely necessary for solving the problem? If not, then remove the question.", + "2. Repetition: Check if any questions are repetitive or duplicate existing questions within the list. If the question is repetitive or duplicate, then remove the question.", + "3. Completeness: Ensure no critical knowledge concepts required to solve the problem are missing, and identify if any additional important questions should have been included.", + "4. Correctness: Verify whether the provided answers are accurate. Revise the checklist in case of incorrect checklist QA pairs.", + "5. Knowledge v.s. Skills: Ensure each question explicitly evaluates a knowledge concept rather than testing skills or problem-solving techniques. Remove any questions that primarily evaluate skills instead of knowledge." + ], + "bbox": [ + 176, + 513, + 818, + 651 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "F Reasoning Complexity", + "text_level": 1, + "bbox": [ + 171, + 679, + 410, + 696 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Instruction Prompt to Solve Questions in Detailed Steps", + "text_level": 1, + "bbox": [ + 179, + 719, + 584, + 734 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": " < \\text{Imoge}>$", + "bbox": [ + 176, + 739, + 344, + 755 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Solve this question with First Order Logic. Write out each thinking step explicitly, do not skip steps. In your response, begin each step with ____STEP_START__.", + "bbox": [ + 176, + 755, + 816, + 779 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "step $<$ step_num>", + "bbox": [ + 178, + 780, + 303, + 792 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "G Comparison with Other Benchmarks", + "text_level": 1, + "bbox": [ + 171, + 820, + 540, + 837 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Figure 9 provides a comparative analysis between VISUALPUZZLES and several widely-used benchmarks for multimodal reasoning, visualizing the knowledge requirement and reasoning complexity of each benchmark. VISUALPUZZLES has high reasoning complexity and low knowledge requirement, with an aim to disentangle multimodal reasoning from domain-specific knowledge to evaluate general reasoning abilities in non-expert settings.", + "bbox": [ + 169, + 853, + 826, + 926 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 344, + 47 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/3baa80789cf99e4f68fa4353ef1538bb9d8872ab955ae28a990f5460c196ae12.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetSizeReasoning LoadKnowledge Requirement% Easy Words Question TypeAnswer Type
LogiQA0.7KHeavyLight52.0TextText
GSM8K8.5KHeavyHeavy54.0TextText
WikiDiverse0.8KLightHeavy35.8Image+TextText
MathVista6.1KHeavyHeavy51.9Image+TextText
MMMU11.5KHeavyHeavy46.4Image+TextText
MATH-Vision3.0KHeavyHeavy53.8Image+TextImage+Text
MathVerse2.6KHeavyHeavy38.2Image+TextText
LogicBench1.5KHeavyLight53.6TextText
LogicVista0.4KHeavyHeavy41.2Image+TextImage
NaturalBench10KLightLight52.5Image+TextText
VISUALPUZZLES1.2KHeavyLight54.1Image+TextImage+Text
", + "bbox": [ + 176, + 99, + 823, + 256 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Table 9: Comparison of other existing benchmarks with VISUALPUZZLES", + "bbox": [ + 232, + 263, + 764, + 280 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/5a9945fb2950cbab9e5972ec413e60b62a9b0274be1ab32e1b5d8735f9bb79f7.jpg", + "image_caption": [ + "Figure 9: Comparison between VISUALPUZZLES and several widely-used benchmarks." + ], + "image_footnote": [], + "bbox": [ + 334, + 300, + 661, + 445 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Table 10 compare the performance of various model families across MathVista, MMMU, and VISUALPUZZLES. Both MathVista and MMMU are benchmarks that have a heavy emphasis on both knowledge and reasoning, whereas VISUALPUZZLES assess models on domain-disentangled multimodal reasoning alone. We found that success on knowledge-intensive multimodal reasoning benchmarks as MathVista and MMMU does not always carry over to VISUALPUZZLES that emphasize reasoning rather than extensive pre-trained knowledge.", + "bbox": [ + 169, + 501, + 826, + 587 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "H Additional Analysis", + "text_level": 1, + "bbox": [ + 171, + 612, + 395, + 630 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "H.1 Proprietary V.S. Open Models", + "text_level": 1, + "bbox": [ + 171, + 647, + 442, + 664 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "From Table 2, proprietary models (e.g., o4-mini and Claude-3.7-Sonnet) consistently achieve higher overall accuracy than most open-source models on VISUALPUZZLES. However, some open models also show competitive or even higher performance in both the overall accuracy and specific reasoning categories. For instance, Qwen2.5-VL-72B-Instruct demonstrates higher performance than GPT-4o on algorithmic reasoning, deductive reasoning, spatial reasoning, and overall accuracy. This indicates that while proprietary models currently have leading performance, open models are also rapidly improving on multimodal reasoning capabilities.", + "bbox": [ + 169, + 676, + 826, + 789 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "H.2 Reasoning Category and Difficulty Levels", + "text_level": 1, + "bbox": [ + 171, + 811, + 529, + 828 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Figure 11 and Figure 10 present complementary views of human accuracy against three representative models: o1 (one of the best-performing proprietary models), Qwen2.5-VL72B-Instruct (the strongest Qwen-based open model), and Llama-3.2-90B-Vision-Instruct (the strongest Llama-based open model). Specifically, Figure 10 compares performance across difficulty levels for each reasoning category, while Figure 11 compares performance across categories within each difficulty level.", + "bbox": [ + 169, + 839, + 826, + 926 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 20 + }, + { + "type": "table", + "img_path": "images/ce6eca9bc80c76f6a1977c79291f1241734efb0200616f04e4ea89031045eeaf.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelMathVistaMMMUVISUALPUZZLES
Human60.388.680.1
o173.978.251.8
GPT-4o63.869.141.1
Gemini-2.0-Flash-71.745.0
Gemini-1.5-Pro63.962.245.4
Claude-3.5-Sonnet67.768.342.4
Claude-3.7-Sonnet-71.848.3
Claude-3.7-Sonnet (Thinking)-75.048.3
LLaVA-1.5-7B-36.226.9
LLaVA-1.5-13B27.636.427.6
LLaVA-NeXT-7B35.834.627.4
LLaVA-NeXT-13B36.235.325.3
LLaVA-NeXT-34B46.551.129.8
LLaVA-OV-0.5B34.831.427.2
LLaVA-OV-7B63.248.829.4
LLaVA-OV-72B67.556.831.8
Llama-3.2-11B-Vision-Instruct51.550.729.4
Llama-3.2-90B-Vision-Instruct57.360.334.3
Qwen2-VL-72B70.564.532.1
QvQ-72B-Preview71.470.337.9
Qwen2-VL-2B-Instruct43.041.131.3
Qwen2-VL-7B-Instruct58.254.130.2
Qwen2-VL-72B-Instruct70.564.534.9
Qwen2.5-VL-3B-Instruct62.353.131.2
Qwen2.5-VL-7B-Instruct68.258.633.7
Qwen2.5-VL-72B-Instruct74.870.242.3
Cambrian-8B49.042.728.5
Cambrian-13B48.040.027.4
", + "bbox": [ + 218, + 101, + 776, + 518 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Table 10: Comparison of other MathVista and MMMU with VISUALPUZZLES on human and SOTA models", + "bbox": [ + 173, + 527, + 821, + 555 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Humans consistently outperform all models across categories and difficulty levels, often by large margins. Notably, human performance remains high and relatively stable in the algorithmic, deductive, and spatial categories, even on hard questions. While accuracy does decline in analogical and inductive reasoning as difficulty increases, humans still maintain a clear advantage over models.", + "bbox": [ + 173, + 603, + 823, + 674 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "In contrast, model performance declines sharply as difficulty increases, especially for open-source models. Accuracy of Llama-3.2-90B-Vision-Instruct on hard analogical tasks drops to just $10\\%$ . Even one of the strongest proprietary models, o1, while more robust, still lags significantly behind humans, particularly on analogical, inductive, and spatial tasks. On easy tasks, some models perform competitively in certain categories, but this advantage largely disappears on medium and hard questions.", + "bbox": [ + 173, + 680, + 823, + 765 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Interestingly, these models maintain a generally stable performance on algorithmic and deductive reasoning. For o1 and Qwen2.5-VL-72B-Instruct, their performances on algorithmic reasoning even go up for more difficult tasks, whereas human performance degraded as the difficulty level increases. However, all models, including o1, perform the worse at analogical, inductive and spatial reasoning in general, especially as the difficulty level increases. This suggests that models are relatively better at tasks requiring structured, rule-based algorithmic processing, while their performance degrades more steeply in tasks requiring relational abstraction (analogical), pattern induction (inductive), and visual understanding (spatial), particularly as the difficulty level increases. In summary, these results indicate that while some models exhibit promising performance on structured and easier reasoning tasks, multimodal models still struggle with abstract and complex reasoning, particularly", + "bbox": [ + 173, + 770, + 823, + 924 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 173, + 32, + 344, + 47 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 948, + 506, + 959 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/e07d22a4f5fa74094ba8126af08a759927de508d0089c6af0dc1d22ad43d3d84.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 173, + 99, + 823, + 246 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/2eb08ceca733cd8c85e3edca9cf46721d82bbaf628b9e06926a75a16c7a708b2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 173, + 246, + 823, + 390 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/c5e2c4db744c3657fe1cb4531ced0aa2d78982d0bb5fcdc58270986c6c22c141.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 173, + 391, + 823, + 534 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/70f13531e69835c70feace1f0c5d4cb9f407929b98d21c7bf77f8b7957c853c2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 173, + 535, + 823, + 679 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/dc9f11d9c6ca0344d00277395624f99e2b3066e6ca555339dae6e6a396995963.jpg", + "image_caption": [ + "Figure 10: Comparison of accuracy across different reasoning categories for human participants, one of the best performing proprietary models o1, the best performing Qwen-based open model Qwen2.5-VL-72B-Instruct, and the best performing Llama-based open model Llama-3.2-90B-Vision-Instruct, measured on difficulty levels." + ], + "image_footnote": [], + "bbox": [ + 173, + 680, + 823, + 825 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 173, + 32, + 346, + 47 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/26d9b479b586179672f0a391c3433b4482c8e8deeca5515d3b7db271c2635940.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 174, + 102, + 823, + 262 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/2df83bc62266ef79c8c7d72021db90c5bdc93adf7f3d635def5ee2e1de1692b1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 173, + 263, + 823, + 425 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/932cbaf51c205a9c980fb43076c9fd7e28519b492074fde5af65adb7c67653ef.jpg", + "image_caption": [ + "Figure 11: Comparison of accuracy across different difficulty levels for human participants, one of the best performing proprietary models o1, the best performing Qwen-based open model Qwen2.5-VL-72B-Instruct, and the best performing Llama-based open model Llama3.2-90B-Vision-Instruct, measured across reasoning categories." + ], + "image_footnote": [], + "bbox": [ + 173, + 426, + 823, + 589 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "when difficulty increases. Bridging the gap between model and human reasoning remains a critical challenge.", + "bbox": [ + 169, + 683, + 823, + 713 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "H.3 Option Types and Difficulty Levels", + "text_level": 1, + "bbox": [ + 171, + 729, + 480, + 744 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Figure 12 compares human accuracy against three representative models, o1 (one of the best-performing proprietary models), Qwen2.5-VL-72B-Instruct (the strongest Qwen-based open model), and Llama-3.2-90B-Vision-Instruct (the strongest Llama-based open model), across different difficulty levels, separately for textual and visual answer options.", + "bbox": [ + 169, + 755, + 826, + 813 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Across all participants and models, we observe a consistent pattern: text-based options result in higher accuracy than image-based options, with the performance gap widening as task difficulty increases. This trend holds even for human participants, whose accuracy drops from $92\\%$ to $40\\%$ on visual options when moving from easy to hard tasks, compared to a much smaller drop on text-based ones ( $93\\%$ to $73\\%$ ).", + "bbox": [ + 169, + 818, + 823, + 888 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "For models, the gap is even more pronounced. For instance, Qwen2.5-VL-72B-Instruct achieves $58\\%$ accuracy on hard questions with text options, but only $20\\%$ when image", + "bbox": [ + 169, + 895, + 823, + 926 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/3737d6ab696a3c310dab5c98fb390eedc23b20fda6553c6b5310acdd5f1eabeb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 174, + 102, + 496, + 229 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/60a6175bfd05a6f7af089e7517aaca78aec0f7e4c49b2736be538da2e6e2dcfa.jpg", + "image_caption": [ + "Llama-3.2-90B-Vision-Instruct" + ], + "image_footnote": [], + "bbox": [ + 521, + 102, + 821, + 229 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/89b0feaa5116c74ddcd31055f2b342bfdeeb7a6e14a35f8811b249750b39baac.jpg", + "image_caption": [ + "Owen2.5-VL-72B-Instruct", + "Figure 12: Comparison of accuracy across different difficulty levels for human participants, one of the best performing proprietary model o1, the best performing Qwen-based open model Qwen2.5-VL-72B-Instruct, and the best performing Llama-based open model Llama3.2-90B-Vision-Instruct, measured on textual v.s. visual option types." + ], + "image_footnote": [], + "bbox": [ + 173, + 239, + 493, + 359 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/ece4b18303218047608cad7c13202f459437b64ddffa0465d71243302f485618.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 522, + 241, + 821, + 359 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "options are used. o1 and Llama-3.2-90B-Vision-Instruct exhibit similar drops, suggesting a broad weakness in multi-image reasoning and visual option discrimination. These findings suggest that image-based answer options introduce significant additional complexity, requiring models not just to understand the question but to reason over multiple visual cues. This capability is essential for real-world tasks such as product selection, recommendation, and visual planning, where their decision-making process often depends on comparing visual content.", + "bbox": [ + 169, + 452, + 826, + 551 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "However, most pretraining datasets and benchmarks have traditionally emphasized textual QA formats, with far fewer examples involving visual options or structured visual comparisons. As a result, models may lack the inductive bias or learned attention mechanisms to handle visual alternatives effectively. These results highlight an important direction for future work: expanding and diversifying training corpora to include multi-choice visual reasoning tasks, and developing architectures that are explicitly designed to process and compare visual candidates, especially under challenging conditions.", + "bbox": [ + 169, + 556, + 826, + 657 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "H.4 Case Study of Reasoning", + "text_level": 1, + "bbox": [ + 171, + 672, + 406, + 690 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Figure 13 shows a case study demonstrating the similarity in structure and reasoning strategy between Claude-3.7-Sonnet and Claude-3.7-Sonnet-Thinking. Average textual similarity between model responses of these two models on VISUALPUZZLES is 0.9.", + "bbox": [ + 169, + 698, + 826, + 742 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "H.5 Impact of CoT", + "text_level": 1, + "bbox": [ + 171, + 758, + 326, + 773 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Table 11 compares model performance under two prompting strategies: direct multiple-choice prompt vs. Chain-of-Thought (CoT) prompt. We observe that proprietary models and larger open models $(\\geq 72\\mathrm{B})$ benefit from CoT, while others show little to no improvement or even a decline in performance with CoT. For instance, both GPT-4o and Qwen2.5-VL-72B-Instruct show more than $20\\%$ in", + "bbox": [ + 169, + 784, + 464, + 924 + ], + "page_idx": 24 + }, + { + "type": "table", + "img_path": "images/999904e34e7035b4b23684e5a2a42c123a5cad6b8ceb3076290e805a48da6d9f.jpg", + "table_caption": [], + "table_footnote": [ + "Table 11: Comparison of models with Direct Multiple Choice and CoT prompting." + ], + "table_body": "
ModelDirectCoT
GPT-4o34.041.6
Gemini-1.5-Pro41.045.1
Claude-3.5-Sonnet40.042.5
Qwen2-VL-2B-Instruct31.326.1
Qwen2.5-VL-7B-Instruct33.732.0
Cambrian-13B27.426.5
LLaVA-NeXT-34B29.829.6
Qwen2.5-VL-72B-Instruct38.642.3
LLama-3.2-90B-Vision-Instruct33.333.9
", + "bbox": [ + 470, + 747, + 831, + 901 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 24 + }, + { + "type": "table", + "img_path": "images/c86427df64f338c67429e5c9fd24fa20e14af7ab99cc71ca9cd42b04000c044d.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Question: Given a pair of chairs arranged at a specific angle, \nselect the option from the four provided that matches the angle of \nthe chairs in the question. (Answer: C)Claude-3.7-Sonnet (With Thinking) \nLet me analyze this problem carefully.
The question shows a “question pair” of chairs at the top of the image, and I need to find which of the four options (A, B, C, D) matches \nthe angle arrangement of those chairs.
Let me examine the question pair first:
- The question pair shows two folding chairs that appear to be facing inward toward each other at approximately a 90-degree angle (the \nchairs are at roughly right angles to each other).
- The chairs in the question pair are shown separated by a colon “:” symbol.
Claude-3.7-Sonnet (Without Thinking) \nI need to analyze the angle relationship between the pair of chairs \nin the question and compare it with the four options provided. \nLooking at the question image at the top: \n- The first chair (on the left) is shown at an angle, with its side \nprofile partially visible \n- The second chair (on the right) is shown more from the front, \nfacing more directly toward the viewer \n- The chairs are positioned at roughly a 90-degree angle to each \nother \nNow examining each option: \nOption A: Two simple chairs with backs. The left chair is shown \nfrom a side angle, while the right chair is shown more from the \nfront. Their orientation appears to be at approximately a 90-degree \nangle to each other. \nOption B: Two backless chairs/stools. The left chair has an angled \ndesign and is shown from the side, while the right chair is shown \nmore straight-on. The angle between them doesn’t appear to \nmatch the 90-degree relationship in the question. \nOption C: Two office/drafting chairs with wheels. Both chairs are \nshown from similar angles, with only slight variation in their \norientation. They don’t appear to have the same 90-degree \nrelationship as the question chairs. \nOption D: Two bar stools with round seats and circular bases. Both \nare shown from similar angles with minimal difference in orientation. \nThey don’t demonstrate the 90-degree relationship seen in the \nquestion. \nComparing all options to the question image, Option A most \nclosely matches the angular relationship between the two chairs in \nthe question. \nAnswer: ANow let me examine each option: \nOption A: \n- Shows two simple chairs with backrests \n- The chairs are facing each other at approximately a 90-degree angle \n- The arrangement looks similar to the question pair in terms of the angle
Option B: \n- Shows two chairs with different designs - one appears to be a more angled/tilted chair and the other is more upright \n- They don’t appear to be at the same angle relationship as the question pair
Option C: \n- Shows two office/drafting chairs with wheels and height adjustment \n- The chairs appear to be facing each other at approximately a 90-degree angle \n- The arrangement looks similar to the question pair in terms of angle
Option D: \n- Shows two bar-style chairs with round seats and central pedestals \n- The chairs appear to be directly facing each other (180-degree relationship) \n- This doesn’t match the angle relationship in the question pair
Based on the angle relationship, both Options A and C show chairs at approximately a 90-degree angle to each other, similar to the \nquestion pair. However, Option A’s chair style and arrangement seems closer to the question pair’s folding chairs.
So, looking at the angle between the chairs, which is the key aspect of this question, Options A and C both show approximately \n90-degree angles between chairs, while Option B shows chairs at different angles, and Option D shows chairs directly facing each other. \nSince the question specifically asks about the angle match, I need to determine which option best matches the exact angle of the \nquestion pair.
Looking closely again: \nThe question pair shows chairs that are facing slightly inward, at approximately 90 degrees. \nOption A shows a similar angle relationship of roughly 90 degrees. \nOption C also shows a similar angle relationship of roughly 90 degrees. \nThis is a bit challenging with the 2D representation, but I think Option A most closely matches the angle relationship in the question pair.
Answer: A
", + "bbox": [ + 173, + 102, + 823, + 435 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Figure 13: Case Study showing the similarity in structure and reasoning strategy between Claude-3.7-Sonnet-Thinking and Claude-3.7-Sonnet. Similarity between model responses of these two models on VISUALPUZZLES is 0.9.", + "bbox": [ + 169, + 439, + 823, + 481 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "creases in performance when using CoT. In contrast, several smaller models, such as Qwen2-VL-2B-Instruct and Cambrian-13B, exhibit decreased accuracy with CoT prompting. These results suggest that CoT can indeed enhance the reasoning capability of larger models whereas it may introduce unnecessary complexity or confusion for smaller models and thus decreasing performance.", + "bbox": [ + 169, + 508, + 826, + 580 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 173, + 32, + 346, + 47 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "I Case Study", + "text_level": 1, + "bbox": [ + 171, + 101, + 305, + 119 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Algorithmic (Easy)", + "text_level": 1, + "bbox": [ + 419, + 150, + 570, + 167 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/424714a384e3ef3140a976266073aef801742fb243abbc70eecfcc5dd6030446.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 596, + 151, + 616, + 165 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/6ff5da0dfd71ea7192bbc4b256b148718f303bd5f12d4f39bba8c9a93445e556.jpg", + "image_caption": [ + "Figure 14: A sample error case of Algorithmic Reasoning (difficulty: Easy)." + ], + "image_footnote": [], + "bbox": [ + 173, + 175, + 408, + 268 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Question: There are seven squares in the picture. How many more triangles than squares are there in the picture?", + "bbox": [ + 419, + 191, + 805, + 218 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Options: (A) 1 (B) 2 (C) 3 (D) 4", + "bbox": [ + 421, + 219, + 607, + 232 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Ground Truth: (C)", + "bbox": [ + 421, + 238, + 537, + 252 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Claude-3.7-Sonnet-Thinking: I need to count the number of squares and triangles in the given picture, and then find how many more triangles there are than squares.", + "bbox": [ + 176, + 276, + 818, + 303 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "First, let me count the squares:", + "bbox": [ + 179, + 314, + 364, + 325 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "1. I can see 7 small squares arranged in a triangular pattern within the large triangle.", + "bbox": [ + 179, + 327, + 683, + 340 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Now, let me count the triangles:", + "bbox": [ + 179, + 351, + 372, + 364 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "1. There's the large outer triangle that contains everything.", + "bbox": [ + 179, + 364, + 529, + 377 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "2. There are small triangles formed by the division of the large triangle by the squares.", + "bbox": [ + 179, + 377, + 692, + 390 + ], + "page_idx": 26 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Let me count these small triangles carefully:", + "- Along the bottom row, I see 4 triangles (formed by the squares and the bottom edge)", + "- On the left side of the large triangle, I see 3 triangles (formed by the squares and the left edge)", + "- On the right side of the large triangle, I see 3 triangles (formed by the squares and the right edge)", + "- Don't forget the large outer triangle itself: 1 triangle" + ], + "bbox": [ + 179, + 402, + 766, + 465 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "So in total: $4 + 3 + 3 + 1 = 11$ triangles", + "bbox": [ + 176, + 476, + 403, + 489 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Now I can find the difference:", + "bbox": [ + 176, + 501, + 359, + 513 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Number of triangles - Number of squares $= 11 - 7 = 4$", + "bbox": [ + 179, + 513, + 496, + 527 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "So there are 4 more triangles than squares in the picture.", + "bbox": [ + 176, + 539, + 516, + 553 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Looking at the options, the answer is (D) 4.", + "bbox": [ + 176, + 564, + 437, + 577 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "", + "text_level": 1, + "bbox": [ + 179, + 577, + 277, + 589 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Looking at the image, I need to count the total number of squares and triangles, then find their difference.", + "bbox": [ + 178, + 589, + 810, + 603 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "First, I'll count the squares:", + "bbox": [ + 178, + 614, + 341, + 626 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "- I can see 7 small squares arranged in a triangular pattern inside the large triangle.", + "bbox": [ + 178, + 627, + 674, + 640 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Next, I'll count the triangles:", + "bbox": [ + 178, + 651, + 351, + 664 + ], + "page_idx": 26 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. There is the large outer triangle that contains everything.", + "2. Along the bottom edge of the large triangle, there are 4 small triangles.", + "3. Along the left edge of the large triangle, there are 3 small triangles.", + "4. Along the right edge of the large triangle, there are 3 small triangles." + ], + "bbox": [ + 179, + 665, + 614, + 715 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Total number of triangles $= 1 + 4 + 3 + 3 = 11$ triangles", + "bbox": [ + 178, + 726, + 503, + 739 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Now I can calculate how many more triangles than squares there are:", + "bbox": [ + 178, + 751, + 594, + 763 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "11 triangles - 7 squares = 4 more triangles", + "bbox": [ + 179, + 765, + 428, + 777 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Answer: D", + "bbox": [ + 179, + 789, + 246, + 800 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Error Category: Reasoning Error", + "bbox": [ + 179, + 806, + 377, + 819 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Error Reason: The model gets the answer wrong because it miscounts the number of triangles, specifically double-counting the triangles formed by small triangles and squares. The response incorrectly conclude there are 11 triangles, but a careful manual inspection shows only 10 distinct triangles, making the correct difference 3, not 4.", + "bbox": [ + 178, + 819, + 818, + 869 + ], + "page_idx": 26 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Algorithmic (Easy)", + "text_level": 1, + "bbox": [ + 419, + 111, + 568, + 128 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/61f9deaf7571114247539cebe087ca5c23f67840862b57d8b656849458a44f08.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 593, + 112, + 612, + 126 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/7461112c409fd02eba1473e6933495500624a55d6357cb99ddfdfec1549ea9f6.jpg", + "image_caption": [ + "Figure 15: A sample correct case of Algorithmic Reasoning (difficulty: Easy)." + ], + "image_footnote": [], + "bbox": [ + 187, + 160, + 271, + 215 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/fb31c8ae8c6c1b5b7fe9668564add2294271e6b74599c26aa0c6c7eea16db2e7.jpg", + "image_caption": [ + "(B)" + ], + "image_footnote": [], + "bbox": [ + 305, + 148, + 364, + 215 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/2d3a21ad802b33bf71c70e0abd227d85a77a0230e56bf9bc88272a39454b0e56.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 383, + 148, + 470, + 215 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/b3960c57aa37463f6a6aae7ee509937f01607f72a797a14974a5afe562bad84c.jpg", + "image_caption": [ + "(D)" + ], + "image_footnote": [], + "bbox": [ + 496, + 148, + 560, + 215 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Question: In the Kangaroo constellation, all stars have a number greater than 3 and their sum is 20. Which option shows the Kangaroo constellation?", + "bbox": [ + 566, + 141, + 792, + 202 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Ground Truth: (B)", + "bbox": [ + 568, + 208, + 679, + 222 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Claude-3.7-Sonnet-Thinking: The question asks about the Kangaroo constellation, which has the following properties:", + "bbox": [ + 184, + 234, + 810, + 257 + ], + "page_idx": 27 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. All stars have a number greater than 3", + "2. The sum of all numbers is 20" + ], + "bbox": [ + 186, + 257, + 380, + 276 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Let me check each option to see which one satisfies these conditions:", + "bbox": [ + 184, + 286, + 514, + 297 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Option (A):", + "text_level": 1, + "bbox": [ + 184, + 306, + 243, + 316 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Stars have values: 5, 4, 7, 6", + "bbox": [ + 186, + 318, + 315, + 327 + ], + "page_idx": 27 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Are all numbers greater than 3? Yes (5, 4, 7, 6 are all $>3$ )", + "- Sum: $5 + 4 + 7 + 6 = {22}$" + ], + "bbox": [ + 186, + 328, + 459, + 345 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "This doesn't match the required sum of 20.", + "bbox": [ + 186, + 347, + 390, + 357 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Option (B):", + "text_level": 1, + "bbox": [ + 184, + 367, + 241, + 377 + ], + "page_idx": 27 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Stars have values: 5, 8, 7", + "- Are all numbers greater than 3? Yes (5, 8, 7 are all $>3$ )", + "- Sum: $5 + 8 + 7 = {20}$" + ], + "bbox": [ + 186, + 378, + 447, + 406 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "This matches both conditions: all numbers are greater than 3, and they sum to 20.", + "bbox": [ + 184, + 407, + 573, + 417 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Option (C):", + "text_level": 1, + "bbox": [ + 184, + 426, + 241, + 436 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Stars have values: 5, 3, 7, 2, 8", + "bbox": [ + 186, + 438, + 325, + 446 + ], + "page_idx": 27 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Are all numbers greater than 3? No (3 and 2 are not $>3$ )", + "- Sum: $5 + 3 + 7 + 2 + 8 = {25}$" + ], + "bbox": [ + 186, + 446, + 459, + 465 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "This doesn't satisfy the first condition about all stars having numbers greater than 3.", + "bbox": [ + 186, + 467, + 589, + 478 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Option (D):", + "text_level": 1, + "bbox": [ + 184, + 487, + 241, + 497 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Stars have values: 5, 1, 4, 9", + "bbox": [ + 186, + 498, + 313, + 506 + ], + "page_idx": 27 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Are all numbers greater than 3? No (1 is not $>3$ )", + "- Sum: $5 + 1 + 4 + 9 = {19}$", + "This doesn't satisfy either condition." + ], + "bbox": [ + 186, + 507, + 423, + 537 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "So, only Option (B) satisfies both conditions of the Kangaroo constellation.", + "bbox": [ + 184, + 547, + 542, + 556 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "", + "text_level": 1, + "bbox": [ + 186, + 558, + 264, + 566 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Solving the Kangaroo Constellation Problem", + "bbox": [ + 186, + 566, + 410, + 577 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "I need to identify the constellation where all stars have numbers greater than 3 and sum to 20.", + "bbox": [ + 184, + 585, + 633, + 598 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Let me examine each option:", + "bbox": [ + 184, + 606, + 325, + 618 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Option (A)", + "text_level": 1, + "bbox": [ + 184, + 627, + 256, + 637 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Stars: 5, 4, 7, 6", + "bbox": [ + 186, + 638, + 254, + 646 + ], + "page_idx": 27 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- All numbers $> 3$ ? Yes", + "- Sum: $5 + 4 + 7 + 6 = {22}$" + ], + "bbox": [ + 186, + 648, + 302, + 665 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Doesn't match the required sum of 20.", + "bbox": [ + 186, + 667, + 369, + 676 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Option (B)", + "text_level": 1, + "bbox": [ + 184, + 686, + 254, + 696 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Stars: 5, 8, 7", + "bbox": [ + 186, + 698, + 243, + 705 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "- All numbers $> 3$ ? Yes", + "bbox": [ + 186, + 708, + 294, + 715 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "- Sum: $5 + 8 + 7 = {20}$", + "bbox": [ + 186, + 717, + 285, + 724 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Matches both requirements!", + "bbox": [ + 186, + 727, + 321, + 737 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Option (C)", + "text_level": 1, + "bbox": [ + 186, + 747, + 254, + 757 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Stars: 5, 3, 7, 2, 8", + "bbox": [ + 186, + 758, + 264, + 766 + ], + "page_idx": 27 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- All numbers $> 3$ ? No (3 and 2 are $\\leq 3$ )", + "- Sum: $5 + 3 + 7 + 2 + 8 = 25$" + ], + "bbox": [ + 186, + 767, + 370, + 786 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Doesn't satisfy the first condition.", + "bbox": [ + 186, + 787, + 348, + 797 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Option (D)", + "text_level": 1, + "bbox": [ + 184, + 806, + 256, + 818 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Stars: 5, 1, 4, 9", + "bbox": [ + 186, + 819, + 254, + 827 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "- All numbers $> 3$ ? No (1 is $\\leq 3$ )", + "bbox": [ + 186, + 828, + 334, + 837 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "- Sum: $5 + 1 + 4 + 9 = {19}$", + "bbox": [ + 186, + 838, + 302, + 845 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Doesn't satisfy either condition.", + "bbox": [ + 186, + 847, + 338, + 857 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Only option (B) satisfies both requirements: all stars have numbers greater than 3, and their sum is exactly 20.", + "bbox": [ + 184, + 866, + 709, + 878 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Answer: B", + "bbox": [ + 186, + 887, + 238, + 896 + ], + "page_idx": 27 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Algorithmic (Medium)", + "text_level": 1, + "bbox": [ + 405, + 111, + 583, + 128 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/00e50a1568abcd54465fc320e5e371cbde25a42d012dacc8fef4c74e1687b8cd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 591, + 112, + 612, + 126 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/33212890af4e16181bfc285e43be24966ae928823fec25db8cb92706b2348fe9.jpg", + "image_caption": [ + "Figure 16: A sample error case of Algorithmic Reasoning (difficulty: Medium)." + ], + "image_footnote": [], + "bbox": [ + 186, + 157, + 545, + 185 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Question: How many left hands appear in the image?", + "bbox": [ + 568, + 142, + 790, + 167 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Options: (A) 1 (B) 8 (C) 4 (D) 5", + "bbox": [ + 571, + 167, + 751, + 180 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Ground Truth: (C)", + "bbox": [ + 570, + 186, + 681, + 200 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Claude-3.7-Sonnet-Thinking: ", + "text_level": 1, + "bbox": [ + 184, + 219, + 411, + 231 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Looking at the image, I can see approximately 10-12 hand icons/emoji in a row. They're all yellow (with one that appears to be skin-colored), and they show various hand gestures.", + "bbox": [ + 184, + 231, + 808, + 255 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "To determine which ones are left hands, I need to look at the thumb position. In a left hand, the thumb would typically be on the right side when the palm is facing forward.", + "bbox": [ + 184, + 263, + 808, + 287 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Let me go through each hand from left to right:", + "bbox": [ + 184, + 297, + 437, + 310 + ], + "page_idx": 28 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Thumbs down gesture - appears to be a left hand with thumb on right side", + "2. Fist or closed hand - orientation is harder to determine but seems to be a left hand", + "3. Open hand or waving hand - appears to be a left hand", + "4. Hand pointing or gesturing - appears to be a left hand", + "5. Open hand with fingers spread - appears to be a left hand", + "6. Fist with extended thumb (like \"thumbs up\" but horizontal) - appears to be a right hand (thumb on left)", + "7. Open hand with fingers spread - appears to be a left hand", + "8. Hand making \"OK\" gesture - appears to be a left hand", + "9. Victory/peace sign hand - appears to be a left hand", + "10. Ear or side of face (not a hand)", + "11. Hand pointing or gesturing - appears to be a right hand" + ], + "bbox": [ + 184, + 320, + 746, + 443 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "So counting the left hands, I see approximately 8 left hands. This would correspond to option (B) 8.", + "bbox": [ + 184, + 453, + 710, + 465 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "However, I want to be careful because determining left vs. right can sometimes be ambiguous in emoji or simplistic hand representations. Let me double check my count.", + "bbox": [ + 184, + 474, + 808, + 498 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Looking again, I see the thumbs down, several open hands, some gesture hands, and a few that are definitely left hands based on thumb position. The total count of left hands appears to be around 8, though there's some ambiguity with a couple of the representations.", + "bbox": [ + 184, + 508, + 808, + 542 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Based on my analysis, the answer appears to be (B) 8.", + "bbox": [ + 184, + 553, + 470, + 565 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "", + "bbox": [ + 186, + 566, + 272, + 577 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "To solve this question, I need to examine the image carefully and identify which hands are left hands.", + "bbox": [ + 184, + 577, + 723, + 588 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Looking at the row of yellow hand emojis, I need to determine whether each is a left or right hand. For hand emojis, I can identify a left hand by noticing the thumb position - on a left hand, the thumb will typically appear on the right side when the palm is facing forward.", + "bbox": [ + 184, + 597, + 808, + 632 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Going through each emoji from left to right:", + "bbox": [ + 184, + 643, + 419, + 654 + ], + "page_idx": 28 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Thumbs down - this is a left hand (thumb is on the right)", + "2. Fist - appears to be a left hand", + "3. Waving hand - this is a left hand", + "4. Pointing hand - this is a left hand", + "5. Open hand with fingers spread - this is a left hand", + "6. Fist with extended thumb - this appears to be a right hand (thumb on left)", + "7. Open hand - this is a left hand", + "8. OK gesture hand - this is a left hand", + "9. Peace sign hand - this is a left hand", + "10. Ear emoji (not a hand)", + "11. Pointing gesture - right hand" + ], + "bbox": [ + 186, + 655, + 589, + 776 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "I count 8 left hands total in the image.", + "bbox": [ + 184, + 786, + 387, + 799 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Answer: B", + "bbox": [ + 184, + 808, + 243, + 819 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Error Category: Spatial / Orientation Error", + "text_level": 1, + "bbox": [ + 184, + 834, + 439, + 845 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Error Reason: The model makes a orientation error by misinterpreting the thumb positions in several hand emojis, leading it to incorrectly classify some right hands as left hands. For example, it counts the pointing hand in the third emoji (which is clearly a right hand) as a left hand. This misclassification results in an overcount of left hands.", + "bbox": [ + 184, + 847, + 808, + 895 + ], + "page_idx": 28 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Algorithmic (Medium)", + "text_level": 1, + "bbox": [ + 405, + 111, + 583, + 128 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/1ab5d39c309739928dbdbad3db6a7908d5d62c04378b3c3dfc2525e0114c5e01.jpg", + "image_caption": [ + "Figure 17: A sample correct case of Algorithmic Reasoning (difficulty: Medium)." + ], + "image_footnote": [], + "bbox": [ + 207, + 133, + 356, + 250 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Question: The entire pie seen in the image is divided among several children. Each child receives a piece of pie, and each piece of pie looks identical. What is the maximum possible number of children there? Options: (A) 9 (B) 8 (C) 10 (D) 4", + "bbox": [ + 377, + 160, + 792, + 212 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Ground Truth: (B)", + "bbox": [ + 380, + 224, + 491, + 239 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Claude-3.7-Sonnet-Thinking: I need to determine how many equal-sized pieces this pie can be divided into. Since each child will receive an identical piece, I need to count the maximum number of equal portions visible in the image.", + "bbox": [ + 183, + 263, + 812, + 303 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Looking at the image, I can see that this appears to be an unusual-looking pie with different colored sections arranged in a circular pattern. It's not a typical pie with simple wedge-shaped slices.", + "bbox": [ + 183, + 313, + 808, + 339 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "From what I can observe, there appears to be a pattern of colored shapes (green, light green, blue, and red) arranged in a circle. To determine how many equal pieces there are, I need to identify the repeating pattern and count how many times it repeats around the circle.", + "bbox": [ + 183, + 349, + 808, + 388 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "The pie seems to have a symmetrical arrangement. Looking carefully, I can see that there appear to be 8 sections around the outer ring that follow a similar pattern. I can see repeated patterns of green and light green shapes with blue and red accents between them.", + "bbox": [ + 183, + 398, + 808, + 436 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Looking at the options:", + "bbox": [ + 184, + 448, + 323, + 460 + ], + "page_idx": 29 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(A) 9", + "(B) 8", + "(C) 10", + "(D) 4" + ], + "bbox": [ + 186, + 460, + 223, + 508 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Based on my observation of the image, there appear to be 8 similar sections arranged around the circle. This would suggest that option (B) 8 is the correct answer.", + "bbox": [ + 184, + 521, + 808, + 546 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "However, I should double-check by looking for any other patterns or symmetries in the image. It's possible that each section could be further divided, or that there's a different way to interpret the equal pieces.", + "bbox": [ + 183, + 556, + 808, + 584 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "After careful examination, I'm confident that there are 8 identical sections arranged in a circular pattern. Therefore, the maximum number of children who could receive identical pieces of pie would be 8.", + "bbox": [ + 183, + 594, + 808, + 619 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "", + "bbox": [ + 186, + 619, + 281, + 632 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Determining the Maximum Number of Children", + "bbox": [ + 186, + 632, + 482, + 643 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "To solve this problem, I need to examine the pie in the image and determine how many identical pieces it can be divided into.", + "bbox": [ + 183, + 655, + 808, + 680 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Analysis of the Pie", + "bbox": [ + 184, + 691, + 316, + 704 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Looking at the image, I can see a circular pie with a pattern of colored sections. The key is to identify how many identical sections are present or could be created.", + "bbox": [ + 183, + 705, + 808, + 729 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Upon careful examination, I notice that the pie has a symmetrical arrangement with a repeating pattern. The colorful segments (green, light green, red, and blue shapes) appear to form a consistent pattern around the circle.", + "bbox": [ + 183, + 741, + 808, + 777 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Counting these repeating sections carefully, I can identify 8 identical portions around the circumference of the pie. Each of these sections follows the same pattern of colored shapes.", + "bbox": [ + 183, + 790, + 808, + 816 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Conclusion", + "bbox": [ + 184, + 827, + 274, + 838 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Since each child must receive an identical piece, and there are 8 identical sections visible in the pie, the maximum possible number of children would be 8.", + "bbox": [ + 183, + 839, + 808, + 864 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Answer: B", + "bbox": [ + 184, + 875, + 250, + 887 + ], + "page_idx": 29 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 344, + 47 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "30", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Algorithmic (Hard)", + "text_level": 1, + "bbox": [ + 418, + 111, + 570, + 128 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/927d08a0f0f6c9898c84461b46688bcabf55b744090097079b653255cfc913a2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 593, + 112, + 612, + 127 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/090c5d20e7dbcee00cc16fc3ac0259c5d9be49d302ec9a733870fb9c076fd3e8.jpg", + "image_caption": [ + "Figure 18: A sample error case of Algorithmic Reasoning (difficulty: Hard)." + ], + "image_footnote": [], + "bbox": [ + 212, + 133, + 349, + 246 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Question: There are 53 residents living in the apartment. How many people live on the second floor?", + "bbox": [ + 366, + 152, + 790, + 176 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Options: (A) 5 (B) 8 (C) 10 (D) 13", + "bbox": [ + 367, + 178, + 563, + 191 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Ground Truth: (A)", + "bbox": [ + 367, + 196, + 480, + 212 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Claude-3.7-Sonnet-Thinking: This image shows an apartment building with different sections labeled with numbers. Let me analyze what the numbers represent:", + "bbox": [ + 184, + 247, + 812, + 265 + ], + "page_idx": 30 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The building appears to have multiple floors.", + "- The number 27 is shown on the left side of one section (blue).", + "- The number 23 is shown on the right side of a section (also seems to cover two blue floors).", + "- The number 18 is shown on the left side of a pink/red section marked with \"?\"", + "- The number 13 is shown on the right side of the bottom floor." + ], + "bbox": [ + 184, + 270, + 529, + 309 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "The question asks how many people live on the second floor, but I need to determine which floor is the second floor and how many people live there.", + "bbox": [ + 184, + 316, + 741, + 327 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Looking at the building from bottom to top:", + "bbox": [ + 184, + 332, + 349, + 340 + ], + "page_idx": 30 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Bottom floor (blue with windows): labeled with 13 on the right", + "2. Second floor (pink/red with \"?\"): labeled with 18 on the left", + "3. Two middle floors (blue with windows): part of the 23 on the right", + "4. Top floor (blue with windows): labeled with 27 on the left" + ], + "bbox": [ + 186, + 340, + 442, + 372 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "So it seems the second floor from the bottom is the one marked with $?^{\\prime \\prime}$ and has 18 on the left side. But the question asks specifically how many people live on the second floor, not how many are associated with the label.", + "bbox": [ + 184, + 378, + 812, + 396 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Let me think about this more carefully. The total number of residents is 53. The numbers shown might be:", + "bbox": [ + 184, + 402, + 578, + 412 + ], + "page_idx": 30 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- 27: Number of people on the top floor", + "- 23: Number of people on the two middle floors combined", + "- 18: Number of people on the second floor", + "- 13: Number of people on the bottom floor" + ], + "bbox": [ + 186, + 412, + 406, + 441 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Let's check if these add up: $27 + 23 + 18 + 13 = 81$ , which is more than 53.", + "bbox": [ + 184, + 449, + 457, + 458 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Wait, I think I'm misinterpreting. The numbers might represent:", + "bbox": [ + 184, + 465, + 424, + 474 + ], + "page_idx": 30 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- 27: People on left side (top section)", + "- 23: People on right side (upper two floors)", + "- 18: People on left side (lower section)", + "- 13: People on right side (bottom floor)" + ], + "bbox": [ + 186, + 474, + 349, + 505 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "So $27 + 18 = 45$ people on the left, and $23 + 13 = 36$ people on the right. That's still $45 + 36 = 81$ people, which doesn't match 53.", + "bbox": [ + 184, + 511, + 656, + 521 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Let me try a different approach. Maybe the 23 represents the number of people on the right side of the building (covering multiple floors), and 18 represents people on another part.", + "bbox": [ + 184, + 527, + 810, + 545 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Actually, looking at the diagram again, it appears that:", + "bbox": [ + 184, + 550, + 390, + 559 + ], + "page_idx": 30 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- 27: Left side of the top section", + "- 23: Right side of the top two sections", + "- 18: Left side of the bottom section", + "- 13: Right side of the bottom section" + ], + "bbox": [ + 186, + 559, + 328, + 589 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Given the total is 53 residents, and we're trying to find the number on the second floor, let's try to interpret the diagram differently.", + "bbox": [ + 184, + 597, + 671, + 607 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "The first (bottom) floor seems to have 13 people.", + "bbox": [ + 184, + 612, + 367, + 621 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "The top appears to have 27 people.", + "bbox": [ + 186, + 621, + 318, + 628 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "That leaves $53 - 13 - 27 = 13$ people for the second and third floors combined.", + "bbox": [ + 186, + 628, + 470, + 637 + ], + "page_idx": 30 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "I'm still uncertain, but based on my calculations and the available options, it seems like (D) 13 might be the answer if we interpret the second floor as having all the remaining residents after accounting for the top and bottom floors.", + "", + "Solving the Apartment Residents Problem" + ], + "bbox": [ + 184, + 643, + 810, + 676 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Looking at the image, I need to determine how many residents live on the second floor of this apartment building.", + "bbox": [ + 184, + 681, + 611, + 691 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "I can see a diagram of a building with several numbers:", + "bbox": [ + 184, + 698, + 393, + 705 + ], + "page_idx": 30 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The top section has \"27\" on the left side", + "- The middle two floors have \"23\" on the right side", + "- There's a section marked with \"18\" on the left.", + "- The bottom floor has \"13\" on the right side", + "- The second floor from the bottom is marked with a \"?\" - this is what I need to find" + ], + "bbox": [ + 186, + 705, + 495, + 746 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "I notice the question states there are 53 residents total living in the apartment building.", + "bbox": [ + 184, + 753, + 509, + 763 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Let me work this out:", + "bbox": [ + 184, + 768, + 267, + 776 + ], + "page_idx": 30 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The top floor appears to have 27 residents", + "- The bottom floor appears to have 13 residents", + "- That means the middle two floors together must have: ${53} - {27} - {13} = {13}$ residents" + ], + "bbox": [ + 186, + 777, + 488, + 800 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Looking closely at the image, the second floor is specifically marked with a \"?\" and there are numbers on the sides indicating 13 for that floor.", + "bbox": [ + 184, + 806, + 710, + 816 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Therefore, the second floor has 13 residents.", + "bbox": [ + 184, + 821, + 351, + 830 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Answer: D", + "bbox": [ + 186, + 838, + 228, + 845 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Error Category: Understanding Error", + "bbox": [ + 184, + 849, + 406, + 862 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Error Reason: The model makes an image understanding error by misunderstanding what is the meaning of the brackets and numbers, despite all the branching and revalidation. This misunderstanding causes it to iteratively try to understand the meaning of the numbers and ultimately select the wrong answer.", + "bbox": [ + 184, + 862, + 812, + 898 + ], + "page_idx": 30 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 30 + }, + { + "type": "page_number", + "text": "31", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Algorithmic (Hard)", + "text_level": 1, + "bbox": [ + 418, + 111, + 568, + 128 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/734da95c949dd5902eb030ed4246a550a978e842247fed2f79fc705914fc1123.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 593, + 112, + 612, + 126 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/85d2f65accb24a4a7760e618083811214a75b10243babb7aedceef3ce06aab19.jpg", + "image_caption": [ + "Figure 19: A sample correct case of Algorithmic Reasoning (difficulty: Hard)." + ], + "image_footnote": [], + "bbox": [ + 204, + 136, + 316, + 222 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Question: A bird can only move horizontally or vertically towards a monkey in the grid. The number of ways that the bird can reach the monkey is:", + "bbox": [ + 352, + 143, + 792, + 180 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Options: (A) 15 (B) 14 (C) 18 (D) 12", + "bbox": [ + 354, + 181, + 563, + 195 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Ground Truth: (A)", + "bbox": [ + 354, + 203, + 468, + 217 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Claude-3.7-Sonnet-Thinking: Looking at the grid, I need to identify the positions of the bird and the monkey.", + "bbox": [ + 183, + 229, + 810, + 255 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "The bird appears to be at the top of the grid in the second column, specifically at position $(0,1)$ if we use a zero-indexed grid counting from the top-left.", + "bbox": [ + 183, + 263, + 812, + 287 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "The monkey appears to be at the bottom-right of the grid, at position $(4,3)$ using the same coordinate system.", + "bbox": [ + 184, + 297, + 766, + 310 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "To count the number of ways the bird can reach the monkey, I need to consider that the bird can only move horizontally or vertically. This means the bird can only move right or down to reach the monkey from its starting position.", + "bbox": [ + 183, + 319, + 808, + 354 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "The total number of valid paths will be the number of ways to arrange the required horizontal and vertical moves.", + "bbox": [ + 184, + 363, + 792, + 377 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "From position $(0,1)$ to $(4,3)$ , the bird needs to move:", + "bbox": [ + 184, + 386, + 465, + 397 + ], + "page_idx": 31 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- 4 steps down (from row 0 to row 4)", + "- 2 steps right (from column 1 to column 3)" + ], + "bbox": [ + 184, + 398, + 413, + 421 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "So in total, the bird needs to make $4 + 2 = 6$ moves, with 4 being vertical and 2 being horizontal.", + "bbox": [ + 183, + 430, + 692, + 443 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "The number of ways to arrange these moves is given by the combination formula:", + "bbox": [ + 184, + 453, + 622, + 464 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "C(total moves, horizontal moves) = C(6, 2) = 6! / (2! * 4!) = (6 * 5) / (2 * 1) = 30 / 2 = 15", + "bbox": [ + 184, + 464, + 643, + 476 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Alternatively, you can calculate $C(6, 4)$ which would give the same result:", + "bbox": [ + 184, + 486, + 576, + 498 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\nC (6, 4) = 6! / (4! * 2!) = 1 5\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 498, + 321, + 508 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "So there are 15 different ways for the bird to reach the monkey.", + "bbox": [ + 184, + 518, + 519, + 532 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Looking at the options, this corresponds to answer (A) 15.", + "bbox": [ + 184, + 541, + 493, + 553 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "", + "bbox": [ + 184, + 553, + 272, + 564 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Solving the Grid Path Problem", + "bbox": [ + 184, + 564, + 361, + 575 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "In this problem, I need to find the number of ways that a bird can reach a monkey by moving only horizontally or vertically in a grid.", + "bbox": [ + 183, + 585, + 812, + 609 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Looking at the image:", + "bbox": [ + 184, + 619, + 303, + 631 + ], + "page_idx": 31 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The bird is located at the top of the grid in the second column (position (0,1) if we start counting from 0)", + "- The monkey is located at the bottom-right area (position (4,3))" + ], + "bbox": [ + 184, + 631, + 743, + 654 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Step 1: Determine the required moves", + "bbox": [ + 184, + 664, + 406, + 675 + ], + "page_idx": 31 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "From the bird to the monkey, the bird needs to move:", + "- 4 steps down (from row 0 to row 4)", + "- 2 steps right (from column 1 to column 3)" + ], + "bbox": [ + 184, + 676, + 470, + 709 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "So the bird must make a total of 6 moves: 4 vertical and 2 horizontal.", + "bbox": [ + 184, + 719, + 550, + 731 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "## Step 2: Calculate the number of possible paths", + "bbox": [ + 184, + 741, + 450, + 753 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "This is a classic combinatorial problem. The number of different ways to arrange these moves is:", + "bbox": [ + 184, + 753, + 697, + 765 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "C(total moves, horizontal moves) = C(6,2) = 6! / (2! $\\times$ 4!)", + "bbox": [ + 184, + 775, + 472, + 787 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} = (6 \\times 5) / (2 \\times 1) \\\\ = 3 0 / 2 \\\\ = 1 5 \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 186, + 775, + 472, + 818 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Alternatively, we could calculate $\\mathrm{C}(6,4)$ which gives the same result.", + "bbox": [ + 184, + 830, + 547, + 843 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "## Step 3: Verify with the given options", + "bbox": [ + 184, + 853, + 398, + 864 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Our answer of 15 matches option (A).", + "bbox": [ + 184, + 864, + 387, + 876 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Answer: A", + "bbox": [ + 186, + 886, + 246, + 897 + ], + "page_idx": 31 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 31 + }, + { + "type": "page_number", + "text": "32", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Analogical (Easy)", + "text_level": 1, + "bbox": [ + 426, + 112, + 563, + 128 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/e518d4acbe9c161cd715fed617ffb95611fdb16cf38907210ed5d5f595521048.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 593, + 112, + 612, + 126 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/50a2de794b432270d8a6b8a4ca03979f06543861e5246d77de2f2322303eec57.jpg", + "image_caption": [ + "Figure 20: A sample error case of Analogical Reasoning (difficulty: Easy)." + ], + "image_footnote": [], + "bbox": [ + 205, + 136, + 243, + 167 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/a77154c58642491a23c268c4034f10ad4af28ba62a0d416a6bd8022d2dd274b2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 205, + 167, + 241, + 195 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/cb35d65666a6596b675ccdc24c9b37f588de9281849111d31cf59dc8561dd8a1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 248, + 137, + 285, + 166 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/22e8f2fd45414ea0c02565acbba730dd18db188566730e74d38f7073ac825006.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 251, + 167, + 284, + 195 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/2c357401c5509f0dfd36a58990216a6180a467958210780c06b3d61a2f2e3160.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 287, + 137, + 326, + 166 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/37364e6ffedfd53d5cf11984921f5c210b210bd629162fe84fa6477d697c9de7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 289, + 172, + 303, + 183 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/0920247e737549ac6c013dd4ad786a1d0456780992c80a82459f78cbac53edd4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 305, + 172, + 321, + 183 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/df192506b0a870a0c7543522cc6648f7a5cebc30bdfbff972c7e7ef764491a13.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 187, + 203, + 217, + 228 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/27c1ab9e19183598421b47bc2b2cde9cfa0eca5758064122469fefc4dd9aa1ec.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 228, + 200, + 243, + 228 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/b9bd0cb26aec3f688bb02c915e6e3c6a947257aafe514b16d19b7342827dc8a7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 248, + 200, + 284, + 228 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/eb994d835f7c12f06003e9f62bc37518d1ff14925e962e33d938b47b4b9b5b19.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 287, + 200, + 303, + 228 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/d16e3f610ac02e622ac34890433e4efd1957e013f904088e7fb2cd385a5816c5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 316, + 200, + 349, + 228 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Question: Given the pattern in the first line of blocks, which option at the bottom of the image fits in the question mark in the second line of blocks?", + "bbox": [ + 367, + 152, + 794, + 189 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Ground Truth: (C)", + "bbox": [ + 367, + 196, + 480, + 210 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Claude-3.7-Sonnet-Thinking: Let me analyze the pattern in the first line of blocks and see how it relates to the second line of blocks.", + "bbox": [ + 184, + 234, + 723, + 244 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "First line of blocks:", + "bbox": [ + 184, + 251, + 259, + 258 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "1. The first block has white on the left and blue on the right", + "bbox": [ + 186, + 260, + 406, + 267 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "2. The second block has white on the top and blue on the", + "bbox": [ + 186, + 268, + 401, + 276 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "3. The third block has blue on the left and white on the right", + "bbox": [ + 186, + 276, + 410, + 284 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Second line of blocks:", + "bbox": [ + 186, + 291, + 269, + 297 + ], + "page_idx": 32 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. The first block has black on the top and yellow on the bottom", + "2. The second block has yellow on the left and black on the right", + "3. The third block has a question mark (this is what we need to figure out)" + ], + "bbox": [ + 186, + 299, + 460, + 321 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Let me think about the possible relationships between the first and second line:", + "bbox": [ + 184, + 329, + 480, + 338 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Possibility 1: Color transformation", + "bbox": [ + 184, + 345, + 316, + 352 + ], + "page_idx": 32 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Blue in the first line becomes black in the second line", + "- White in the first line becomes yellow in the second line" + ], + "bbox": [ + 186, + 354, + 400, + 369 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Possibility 2: Position pattern", + "bbox": [ + 184, + 376, + 297, + 385 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "The first line blocks follow a pattern of right/bottom/left coloring for the blue part.", + "bbox": [ + 186, + 385, + 498, + 393 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "The second line blocks might follow the same positional pattern but with different colors.", + "bbox": [ + 186, + 393, + 519, + 400 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Based on the color transformation pattern I identified (blue→black, white→yellow), and looking at the third block in the first line (blue left, white right), the answer should be a block with black on the left and yellow on the right.", + "bbox": [ + 186, + 400, + 810, + 416 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Looking at the options:", + "bbox": [ + 186, + 422, + 274, + 431 + ], + "page_idx": 32 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Option A: Black on top, yellow on bottom (like the first block in the second row)", + "- Option B: White on left, pink/red on right (doesn't match our color scheme)", + "- Option C: Yellow on top, black on bottom (this would be inverse of first block in second row)", + "- Option D: Blue on bottom, white on top (similar to the second block in the first row)" + ], + "bbox": [ + 186, + 431, + 537, + 462 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "None of the options exactly match what I'm looking for. The relationship between the first line and second line might be more complex. Let me look at it again:", + "bbox": [ + 184, + 469, + 776, + 479 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "First block: white left, blue right $\\rightarrow$ black top, yellow bottom", + "bbox": [ + 184, + 484, + 413, + 494 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Second block: white top, blue bottom $\\rightarrow$ yellow left, black right", + "bbox": [ + 184, + 494, + 421, + 502 + ], + "page_idx": 32 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Let me try to formulate a rule:", + "-White areas become yellow", + "- Blue areas become black", + "- Position changes: left→top, right→bottom, top→left, bottom→right" + ], + "bbox": [ + 186, + 508, + 444, + 540 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "So if the third block has blue left and white right, then the third block in the second row should have black top and yellow bottom according to this rule.", + "bbox": [ + 184, + 547, + 751, + 556 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Option A matches this pattern.", + "bbox": [ + 184, + 563, + 300, + 571 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Let me double-check:", + "bbox": [ + 186, + 579, + 267, + 585 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "First rule pattern: white→yellow, blue→black", + "bbox": [ + 186, + 587, + 357, + 594 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Position transformation: left $\\rightarrow$ top, right $\\rightarrow$ bottom, top $\\rightarrow$ left, bottom $\\rightarrow$ right", + "bbox": [ + 186, + 594, + 465, + 603 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "For block 3 in row 1: blue on left, white on right", + "bbox": [ + 186, + 609, + 364, + 617 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Applying the rules: blue left becomes black top, white right becomes yellow bottom", + "bbox": [ + 186, + 618, + 500, + 625 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "This gives us: black top, yellow bottom", + "bbox": [ + 186, + 626, + 334, + 633 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Option A has black on top and yellow on the bottom, so it seems to be the correct answer.", + "bbox": [ + 186, + 633, + 517, + 641 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "", + "bbox": [ + 186, + 642, + 246, + 648 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "To solve this problem, I need to identify the pattern in the first line of blocks and how it transforms in the second line.", + "bbox": [ + 186, + 650, + 622, + 657 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Let me analyze both rows carefully:", + "bbox": [ + 186, + 664, + 320, + 672 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "First row:", + "bbox": [ + 186, + 680, + 223, + 686 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "- Left block: White on left, blue on right", + "bbox": [ + 186, + 688, + 334, + 695 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "- Middle block: White on top, blue on bottom", + "bbox": [ + 186, + 696, + 356, + 703 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "-Right block:Blue on left, white on right", + "bbox": [ + 186, + 704, + 339, + 710 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Second row:", + "bbox": [ + 186, + 719, + 233, + 724 + ], + "page_idx": 32 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Left block: Black on top, yellow on bottom", + "- Middle block: Yellow on left, black on right", + "- Right block: ? (what we need to determine)" + ], + "bbox": [ + 186, + 727, + 352, + 750 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Looking at the transformation from first to second row:", + "bbox": [ + 186, + 758, + 393, + 765 + ], + "page_idx": 32 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. The blue areas in the first row seem to become black in the second row", + "2. The white areas in the first row seem to become yellow in the second row", + "3. There's also a positional transformation:", + "- Left side $\\rightarrow$ Top position" + ], + "bbox": [ + 186, + 766, + 468, + 797 + ], + "page_idx": 32 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "-Right side $\\rightarrow$ Bottom position", + "-Top position $\\rightarrow$ Left side", + "- Bottom position $\\rightarrow$ Right side" + ], + "bbox": [ + 192, + 797, + 310, + 819 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Option A shows black on top and yellow on bottom, which matches our expected pattern.", + "bbox": [ + 186, + 820, + 519, + 828 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Answer: B", + "bbox": [ + 186, + 835, + 227, + 843 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Error Category: Reasoning Error", + "text_level": 1, + "bbox": [ + 184, + 849, + 377, + 861 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Error Reason: The model gets this question wrong because it focuses too much on individual color mappings or partial position patterns rather than identifying the full transformation. The correct pattern involves a consistent clockwise rotation of color positions from the first to the second row.", + "bbox": [ + 184, + 861, + 810, + 897 + ], + "page_idx": 32 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 32 + }, + { + "type": "page_number", + "text": "33", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Analogical (Easy)", + "text_level": 1, + "bbox": [ + 426, + 112, + 563, + 128 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/61cad466b49690891b856ba14cc70440f36a35be4a0a036b722eeab6b87a80a2.jpg", + "image_caption": [ + "Figure 21: A sample correct case of Analogical Reasoning (difficulty: Easy)." + ], + "image_footnote": [], + "bbox": [ + 593, + 112, + 612, + 126 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/10bed72f87c8e193995a37a5c5e14a781a3051a3d0f0558297a496509e6fabfd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 184, + 138, + 437, + 181 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/d7598ff388d40ce2b7daff4e08614c80aa41b7b49ae51fea453b622c3c9556ad.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 186, + 200, + 406, + 234 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/a60ad6a9cc833737461a840efa4f6a01f8ba6ccbb0a93d01b817ef50dc45e361.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 222, + 244, + 292, + 289 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/ecf21b7d9cbd5dd7fe914185b4dd246cda08fcb78323f43b632ec0f01026af01.jpg", + "image_caption": [ + "B" + ], + "image_footnote": [], + "bbox": [ + 330, + 242, + 383, + 284 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/56840636c47c1f5df7225bd5590c34bae5d99f28eed1a5afd5b2806ace902ec9.jpg", + "image_caption": [ + "C" + ], + "image_footnote": [], + "bbox": [ + 225, + 310, + 287, + 342 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/b93bf93c475a4531b9fbb26b1242800aad6dc72e38106580e5a55971721ed1bd.jpg", + "image_caption": [ + "D" + ], + "image_footnote": [], + "bbox": [ + 330, + 310, + 406, + 343 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Question: Given the analogy in the first line of items, which choice fits best in the black question mark?", + "bbox": [ + 454, + 213, + 790, + 239 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Ground Truth: (C)", + "bbox": [ + 455, + 247, + 568, + 261 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Claude-3.7-Sonnet-Thinking: Let's analyze the analogy in the first line:", + "bbox": [ + 184, + 357, + 586, + 369 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "The first line shows sliced bread on the left and a knife on the right, connected by an arrow.", + "bbox": [ + 184, + 377, + 622, + 388 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "The relationship here appears to be: the knife is a tool used to cut/slice the bread. The knife is what transforms a whole loaf of bread into slices.", + "bbox": [ + 183, + 397, + 812, + 417 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Now, let's look at the second line:", + "bbox": [ + 184, + 426, + 346, + 438 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "- On the left, we see a log (a section of tree trunk)", + "bbox": [ + 186, + 438, + 421, + 448 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "- On the right, we need to determine which tool would be used to process or cut this log", + "bbox": [ + 186, + 448, + 607, + 459 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Let's examine each option:", + "bbox": [ + 186, + 468, + 315, + 478 + ], + "page_idx": 33 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(A) A wooden shed or small building", + "(B) A cleaver or butcher's knife", + "(C) An axe", + "(D) Sliced butter on a wooden board" + ], + "bbox": [ + 186, + 478, + 366, + 518 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "The question is asking which tool would be used to process a log, similar to how a knife is used to slice bread.", + "bbox": [ + 184, + 527, + 710, + 539 + ], + "page_idx": 33 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Option A (wooden shed) is not a tool but a structure that might be built using processed wood", + "- Option B (cleaver/butcher's knife) is a cutting tool but not typically used for logs", + "- Option C (axe) is a tool specifically designed for cutting logs/wood", + "- Option D (sliced butter) is not a tool at all, but a food product" + ], + "bbox": [ + 184, + 547, + 643, + 589 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Following the analogy \"bread $\\rightarrow$ knife\" (something that needs to be cut $\\rightarrow$ tool used to cut it), the logical completion for \"log $\\rightarrow$ ?\" would be an axe, which is a tool specifically designed to cut wood.", + "bbox": [ + 183, + 597, + 810, + 619 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Therefore, the answer appears to be (C) the axe.", + "bbox": [ + 184, + 627, + 415, + 638 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "", + "bbox": [ + 186, + 638, + 264, + 648 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "I need to determine the relationship between the items in the first line and then apply the same relationship to find the missing item in the second line.", + "bbox": [ + 186, + 648, + 810, + 667 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Looking at the first line:", + "bbox": [ + 184, + 678, + 303, + 688 + ], + "page_idx": 33 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- On the left, there's a loaf of bread with some slices cut from it", + "- On the right, there's a knife", + "- The relationship is that the knife is the tool used to cut/slice the bread" + ], + "bbox": [ + 186, + 688, + 527, + 718 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Now, for the second line:", + "bbox": [ + 184, + 727, + 308, + 737 + ], + "page_idx": 33 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- On the left, there's a log (a section of tree trunk)", + "- On the right, I need to find the appropriate tool that would be used to cut or process this log" + ], + "bbox": [ + 186, + 738, + 633, + 758 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Let me analyze each option:", + "bbox": [ + 184, + 767, + 323, + 777 + ], + "page_idx": 33 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(A) A wooden shed/small building - This is a structure that might be built using processed wood, not a tool for cutting logs", + "(B) A cleaver/butcher's knife - While this is a cutting tool, it's not typically used for logs", + "(C) An axe - This is a tool specifically designed for chopping and cutting logs/wood", + "(D) Sliced butter on a board - This is a food product, not a tool for processing logs" + ], + "bbox": [ + 186, + 777, + 776, + 819 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "The pattern is \"material $\\rightarrow$ tool used to cut that material\"", + "bbox": [ + 184, + 827, + 457, + 838 + ], + "page_idx": 33 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "-Bread $\\rightarrow$ Knife", + "- $\\mathrm{Log}\\rightarrow ?$" + ], + "bbox": [ + 186, + 838, + 264, + 858 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "An axe is the tool specifically designed for cutting logs, just as a knife is the tool for slicing bread.", + "bbox": [ + 184, + 867, + 650, + 878 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Answer: C", + "bbox": [ + 186, + 887, + 240, + 897 + ], + "page_idx": 33 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 33 + }, + { + "type": "page_number", + "text": "34", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Analogical (Medium)", + "text_level": 1, + "bbox": [ + 410, + 111, + 578, + 128 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/b175172cabef3457d3fbf51009db02477dbf908a85f4f7569d8011b8942a5875.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 591, + 112, + 612, + 126 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/b7d7d1a6defad214c382f124a73a41a9d9160dcaaf744d712a10ca291058ec55.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 196, + 133, + 241, + 189 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/d20318dee2d84b8a10a978ba5d30a8fbe102063bc9651d8ef76d1f77c1d7ef91.jpg", + "image_caption": [ + ":" + ], + "image_footnote": [], + "bbox": [ + 253, + 138, + 323, + 184 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/c92374dde78af9c0322744c19117f204b609d167b4de3d59228ed153f09a6a03.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 334, + 138, + 408, + 184 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/7ee0bf8793fb3d09c02768e277095ab8284e9064c0e822e409949164f9d52fe8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 410, + 135, + 473, + 188 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/34dd8ccdd69dd7fd4e38f6df7a7500a4d561d79b031d7cf70a372c6481e767f5.jpg", + "image_caption": [ + "Figure 22: A sample error case of Analogical Reasoning (difficulty: Medium)." + ], + "image_footnote": [], + "bbox": [ + 200, + 210, + 240, + 256 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/784558a7f58d3124ed539611b33a54af63298c5695a8d03677d909c7112a73fd.jpg", + "image_caption": [ + "Candidates" + ], + "image_footnote": [], + "bbox": [ + 266, + 210, + 320, + 256 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/5c75fc66ff632914871c1c14e47d85c5dbae9791373244a7835cb9bafd5595e0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 210, + 403, + 256 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/2d8c23c6642e4fdc059fce954cbe74392aa42e50ac339d36ec570b4538d1b9cd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 410, + 210, + 467, + 256 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Question: Based on the relationship between Origin and Analogy, select the option from the candidates that best completes this analogy.", + "bbox": [ + 490, + 164, + 790, + 202 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Ground Truth: (C)", + "bbox": [ + 491, + 209, + 604, + 223 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Clae3-7-Sonnt-Thinkin: thinkin> h i th f th t, and then identify which candidate option (A, B, C, or D) best completes the right side of the equation when paired with \"Origini\".", + "bbox": [ + 184, + 262, + 812, + 280 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Let's analyze the images:", + "bbox": [ + 184, + 286, + 281, + 295 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Left side:", + "bbox": [ + 184, + 301, + 222, + 309 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "- Origin: A person installing or working on what appears to be a large appliance or system", + "bbox": [ + 186, + 310, + 524, + 318 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "- Analogy: A person installing or repairing what looks like a garage door opener or ceiling-mounted device", + "bbox": [ + 186, + 319, + 584, + 327 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Right side:", + "bbox": [ + 186, + 333, + 228, + 340 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "- Origin: A person's hand operating what appears to be a sound mixing board or control panel with sliders and buttons", + "bbox": [ + 186, + 340, + 630, + 349 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "- Analogy': This is what we need to determine from options A, B, C, or D", + "bbox": [ + 186, + 349, + 457, + 357 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "The relationship between Origin and Analogy seems to be:", + "bbox": [ + 186, + 364, + 405, + 372 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Both involve installation, repair, or maintenance of mechanical/electrical systems, but in different contexts. They show people actively working on or operating different types of equipment or machinery.", + "bbox": [ + 186, + 372, + 810, + 388 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Now, let's look at the candidate options and see which one best represents the same relationship with Origin' (the sound mixing board):", + "bbox": [ + 184, + 395, + 689, + 404 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "A: A person painting or working on a ceiling/wall", + "bbox": [ + 186, + 411, + 375, + 419 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "B: A close-up of someone working on or repairing what appears to be electronic components", + "bbox": [ + 186, + 419, + 531, + 426 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "C: Some mechanical or electronic equipment with wiring", + "bbox": [ + 186, + 428, + 398, + 436 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "D: People moving or installing what looks like a home appliance (possibly a refrigerator or washer/dryer)", + "bbox": [ + 186, + 436, + 581, + 444 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "I need to determine which option shows a relationship to the sound mixing board (Origin') that parallels the relationship between the first two images.", + "bbox": [ + 186, + 450, + 746, + 459 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "If Origin shows someone installing/repairing an appliance, and Analogy shows someone installing/repairing a different type of equipment (garage door opener), then the relationship is about installation/repair of different types of systems.", + "bbox": [ + 184, + 465, + 810, + 482 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Similarly, if Origin' shows someone operating a sound mixing board, then Analogy' should show someone operating a different type of equipment or control system.", + "bbox": [ + 184, + 488, + 799, + 498 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Looking at the options:", + "bbox": [ + 186, + 503, + 274, + 512 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "A: Shows installation/repair work (similar to Origin/ Analogy) but not operation of controls", + "bbox": [ + 186, + 512, + 529, + 521 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "B: Shows repair/maintenance of electronic components", + "bbox": [ + 186, + 522, + 650, + 530 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "C: Shows equipment but not clearly someone operating it.", + "bbox": [ + 186, + 530, + 401, + 537 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "D: Shows people moving/installing an appliance", + "bbox": [ + 186, + 537, + 370, + 544 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "The key relationship between Origin and Analogy seems to be: a person actively working on installing or maintaining a system. Similarly, Origin' shows a person", + "bbox": [ + 186, + 551, + 808, + 559 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "actively operating a control system (mixing board)", + "bbox": [ + 186, + 559, + 374, + 568 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Option B seems closest as it shows hands working on electronic components, which could be considered operating or maintaining another type of electronic system. Though it's not entirely clear from the image, this appears to be the closest match to the relationship pattern.", + "bbox": [ + 186, + 574, + 810, + 590 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "", + "bbox": [ + 186, + 590, + 246, + 598 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Analogy Analysis", + "bbox": [ + 186, + 599, + 261, + 606 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "I need to understand the relationship between \"Origin\" and \"Analogy\" in the first pair, then apply that same relationship to find what completes the \"Origin\" and \"Analogy\" pair.", + "bbox": [ + 184, + 613, + 808, + 630 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Examining the first pair:", + "bbox": [ + 186, + 636, + 290, + 643 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "- \\*\\*Origin\\*\\*: Shows a person installing or repairing what appears to be a large appliance (possibly a washer or dryer)", + "bbox": [ + 186, + 643, + 620, + 652 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "- \\*\\*Analogy\\*\\*: Shows a person installing or working on a ceiling-mounted device (appears to be a garage door opener)", + "bbox": [ + 186, + 652, + 625, + 661 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "The relationship seems to be: Both involve installation/repair/maintenance of household mechanical systems, but in different locations and of different types.", + "bbox": [ + 184, + 667, + 772, + 676 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Examining the second pair:", + "bbox": [ + 186, + 684, + 302, + 691 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "- \\*\\*Origin\\*\\*: Shows hands operating a sound mixing board or audio control panel with sliders and buttons", + "bbox": [ + 186, + 691, + 584, + 699 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "- \\*\\*Analogy\\*\\*: This is what we need to determine", + "bbox": [ + 186, + 699, + 372, + 707 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "I need to find an option that shows someone working with or operating another type of electronic/technical equipment, maintaining the same relationship as the first pair.", + "bbox": [ + 184, + 714, + 810, + 731 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Analyzing the options:", + "bbox": [ + 186, + 738, + 285, + 744 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "- $\\mathrm{^{**}A^{**}}$ : Person painting or installing something on a ceiling", + "bbox": [ + 186, + 746, + 408, + 753 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "- $\\mathbf{\\Pi}^{**}\\mathbf{B}^{**}$ : Close-up of hands working on electronic components/circuit board", + "bbox": [ + 186, + 753, + 465, + 762 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "- $\\mathbf{\\ddot{C}}^{\\star \\star}$ : Complex mechanical/electronic equipment with wiring", + "bbox": [ + 186, + 762, + 424, + 768 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "- $\\mathbf{^{**}D^{**}}$ : Two people moving or installing what appears to be a refrigerator or appliance", + "bbox": [ + 186, + 768, + 509, + 777 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "The relationship we're looking for should show hands working on/operating a different type of technical equipment than a mixing board.", + "bbox": [ + 186, + 784, + 699, + 792 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Option B shows hands working directly with electronic components/circuit board, which parallels how the hands in Origin are working directly with the audio equipment. Both involve hands-on technical work with electronic systems, just different types.", + "bbox": [ + 184, + 800, + 810, + 816 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Answer: B", + "bbox": [ + 186, + 823, + 227, + 830 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Error Category: Reasoning Error", + "text_level": 1, + "bbox": [ + 186, + 835, + 377, + 847 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Error Reason: The model gets the answer wrong because it overemphasizes the type of equipment being worked on (mechanical vs. electronic) rather than the specific action and interface involved in the analogy. Option C correctly mirrors the using / fixing relationship seen in the first pair of analogy, while Option B lacks this direct fixing interaction, despite a similar technical context.", + "bbox": [ + 184, + 848, + 810, + 897 + ], + "page_idx": 34 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 34 + }, + { + "type": "page_number", + "text": "35", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Analogical (Medium)", + "text_level": 1, + "bbox": [ + 410, + 111, + 578, + 128 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/257c05296d2ad268a2d7c6e59b7a42d0258eec902ec9535ec6137f13a615e6da.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 593, + 112, + 612, + 126 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/ab47e0324a8f3696477f82c394854dd355acec65de459490a80c26dc69f8f9ff.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 181, + 143, + 264, + 196 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/7f678c3abec399cf19f625891724b356cd35e119f8affb3fdaca193802247dc2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 266, + 142, + 349, + 198 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/85fc0acebdc041898bc46ceeeb24682fb59f7759acc9e281093e3a19ec0fdb83.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 349, + 143, + 454, + 196 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/ad4b61a98884381daa9f1303c0f51f059c4fb92172fa528912856cb90462826f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 454, + 135, + 540, + 205 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/0c9852c365df3b1611c71ccabfed0cc26a574ebf7515ee1488d453c71195d0e9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 181, + 234, + 269, + 286 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/0ee04f55113fc5aace0ec41a462c21b3615295a732c880c148e7b6f41902a4d8.jpg", + "image_caption": [ + "Candidates" + ], + "image_footnote": [], + "bbox": [ + 285, + 232, + 343, + 290 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/610adf6221d802cd88a3f2515ae82304ccb0bf5468106f467cd6d6c333f8fa7f.jpg", + "image_caption": [ + "Figure 23: A sample correct case of Analogical Reasoning (difficulty: Medium)." + ], + "image_footnote": [], + "bbox": [ + 359, + 233, + 447, + 287 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/e8a8f856fd35aa28d63335409c3b7e168800d1baf1d9d81477bd486902989eda.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 450, + 234, + 537, + 287 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Question: Based on the relationship between Origin and Analogy, select the option from the candidates that best completes this analogy.", + "bbox": [ + 547, + 176, + 810, + 228 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Ground Truth: (B)", + "bbox": [ + 549, + 237, + 661, + 250 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Claude-3.7-Sonnet-Thinking: Let me analyze the relationship between the \"Origin\" and \"Analogy\" images to determine the pattern, and then apply that to find the best match for \"Origin\" from the candidates.", + "bbox": [ + 184, + 301, + 808, + 327 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "In the first pair:", + "bbox": [ + 184, + 335, + 271, + 348 + ], + "page_idx": 35 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Origin: A dog (looks like a spaniel or similar breed) searching/sniffing in grass", + "- Analogy: What appears to be a small animal (possibly a rodent) in its natural habitat with vegetation" + ], + "bbox": [ + 184, + 348, + 728, + 369 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "In the second pair:", + "bbox": [ + 184, + 380, + 287, + 392 + ], + "page_idx": 35 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Origin': A dog (appears to be a small white dog) eating or drinking from a bowl/dish", + "- Analogy': We need to determine this" + ], + "bbox": [ + 184, + 392, + 645, + 414 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "The relationship seems to be showing an animal (dog) in the Origin image, then showing what might be a prey animal or similar smaller animal in a related natural context in the Analogy image.", + "bbox": [ + 184, + 424, + 808, + 449 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "So, for Origin' which shows a dog eating/ drinking, the Analogy' should show some smaller animal also eating.", + "bbox": [ + 184, + 457, + 777, + 470 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Looking at the candidates:", + "bbox": [ + 184, + 479, + 328, + 492 + ], + "page_idx": 35 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "A: Shows a dog with what appears to be a stick or something in its mouth in snow.", + "B: Shows a mouse or small rodent eating something", + "C: Shows what looks like a groundhog or marmot near flowers/plants", + "D: Shows another dog's face/profile" + ], + "bbox": [ + 184, + 492, + 624, + 537 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Based on the pattern (dog $\\rightarrow$ smaller animal in natural context, especially related to the activity the dog is doing), option B (the mouse/rodent eating) would best complete the analogy since it shows a smaller animal doing the same activity (eating) as the dog in Origin!", + "bbox": [ + 184, + 547, + 810, + 580 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "", + "bbox": [ + 184, + 580, + 272, + 592 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "I need to determine the relationship between the \"Origin\" and \"Analogy\" images, and then apply that same relationship to find the appropriate \"Analogy\"\" that matches \"Origin\"\"", + "bbox": [ + 184, + 592, + 810, + 614 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Looking at the first pair:", + "bbox": [ + 184, + 625, + 316, + 637 + ], + "page_idx": 35 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Origin: A dog searching or sniffing in grass (appears to be hunting or tracking)", + "- Analogy: A small animal (looks like a rodent) in its natural habitat among vegetation" + ], + "bbox": [ + 184, + 637, + 643, + 659 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Now looking at the second pair:", + "bbox": [ + 184, + 669, + 357, + 680 + ], + "page_idx": 35 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Origin: A dog eating or drinking from a bowl/dish", + "- Analogy': This is what I need to determine" + ], + "bbox": [ + 186, + 681, + 467, + 703 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "The relationship between the first pair seems to be: a dog engaged in a natural behavior (hunting/tracking), followed by what could be its potential prey or a similar smaller animal in its natural environment.", + "bbox": [ + 184, + 713, + 810, + 737 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Following this pattern, the \"Analogy\" should show a smaller animal engaged in eating or drinking (similar to what the dog is doing in \"Origin\").", + "bbox": [ + 184, + 746, + 810, + 771 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Examining the candidates:", + "bbox": [ + 184, + 780, + 328, + 792 + ], + "page_idx": 35 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "A: A dog with something in its mouth on snow", + "B: A mouse/small rodent eating food", + "C: What appears to be a groundhog or marmot among vegetation/flowers", + "D: A profile view of another dog" + ], + "bbox": [ + 186, + 792, + 581, + 837 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Since the pattern is showing a smaller animal doing a similar activity as the dog, option B (the mouse eating) best completes the analogy because it shows a smaller animal engaged in the same activity (eating) as the dog in Origin!", + "bbox": [ + 184, + 847, + 810, + 871 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Answer: B", + "bbox": [ + 184, + 878, + 243, + 891 + ], + "page_idx": 35 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 35 + }, + { + "type": "page_number", + "text": "36", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Analogical (Hard)", + "text_level": 1, + "bbox": [ + 423, + 111, + 565, + 128 + ], + "page_idx": 36 + }, + { + "type": "image", + "img_path": "images/783d6eab2c018327ef49d4996b35460aeb2b724d7e4d20b7f7afa306007e0475.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 593, + 112, + 612, + 126 + ], + "page_idx": 36 + }, + { + "type": "image", + "img_path": "images/37189ecbd81689a89efc1f56cd6a8ccc393d42226fa9319b2192c3ead6b3c5f1.jpg", + "image_caption": [ + "Figure 24: A sample error case of Analogical Reasoning (difficulty: Hard)." + ], + "image_footnote": [], + "bbox": [ + 178, + 148, + 406, + 226 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Question: This question contains two sets of figures and 4 figures to choose from. The two sets of figures have certain similarities as well as differences. You are required to choose one option that is most suitable to replace the question mark. The correct answer should not only make the two sets of figures the most similar, but also shows uniqueness for the second set of figures.", + "bbox": [ + 411, + 140, + 812, + 215 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Ground Truth: (A)", + "text_level": 1, + "bbox": [ + 413, + 224, + 527, + 238 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Claude-3.7-Sonnet-Thinking: Let me analyze this question carefully. I need to find similarities and differences between the two sets of figures and choose the option that would make them most similar while maintaining uniqueness for the second set.", + "bbox": [ + 184, + 247, + 810, + 265 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "The first set of figures consists of:", + "bbox": [ + 184, + 271, + 312, + 279 + ], + "page_idx": 36 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. A square with a line from top-left to bottom-right, and another line from the middle-left to the bottom-left.", + "2. A square with zigzag lines.", + "3. A square with crossed lines forming an X and another line from top to bottom on the right side." + ], + "bbox": [ + 186, + 280, + 589, + 303 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "The second set of figures consists of:", + "bbox": [ + 184, + 310, + 323, + 316 + ], + "page_idx": 36 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. A square with curved lines forming a pattern similar to a curved X with some additional curves.", + "2. A square with a circle and curved lines inside.", + "3. The question mark - this is what I need to determine." + ], + "bbox": [ + 186, + 319, + 550, + 342 + ], + "page_idx": 36 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Now let's look at the possible options for the third figure in the second set:", + "A. A square with curved semi-circles on the left and right sides", + "B. A square with leaf-like shapes in the four corners.", + "C. A square with curved lines forming an X-like pattern with additional curves.", + "D. A square with star-like pattern created by curved lines." + ], + "bbox": [ + 184, + 349, + 482, + 388 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "To find the pattern, I need to look for similarities between the first set and second set:", + "bbox": [ + 184, + 395, + 504, + 404 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Comparing the first figures of each set:", + "bbox": [ + 184, + 411, + 331, + 417 + ], + "page_idx": 36 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- First set: Square with straight lines", + "- Second set: Square with curved lines" + ], + "bbox": [ + 186, + 420, + 328, + 435 + ], + "page_idx": 36 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Comparing the second figures of each set:", + "- First set: Square with zigzag straight lines", + "- Second set: Square with a circle and curved lines" + ], + "bbox": [ + 184, + 441, + 372, + 465 + ], + "page_idx": 36 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "I notice that the second set seems to be using curved lines where the first set uses straight lines. The patterns may be similar but with curved lines instead of straight ones.", + "Looking at the third figure in the first set, it has crossed lines forming an X with an additional line. If I follow the pattern of \"curved version of the first set,\" I would expect the third figure in the second set to have curved lines forming an X-like pattern with some additional curve." + ], + "bbox": [ + 184, + 473, + 810, + 513 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Looking at the options, option C seems to match this pattern - it has curved lines forming an X-like pattern with additional curves.", + "bbox": [ + 184, + 518, + 671, + 529 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "However, I should also check if there's another pattern I'm missing:", + "bbox": [ + 184, + 536, + 437, + 545 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Another possibility is that there's a progression across each row. In the first set, we see increasing complexity or number of lines. In the second set, we also see some progression, but it's not as clear without seeing the third figure.", + "bbox": [ + 184, + 551, + 810, + 569 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Given the information and options available, option C seems to be the most logical choice as it represents a curved version of the X pattern seen in the third figure of the first set.", + "bbox": [ + 184, + 574, + 810, + 590 + ], + "page_idx": 36 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "", + "I need to analyze both sets of figures to find patterns and determine which option would complete the second set." + ], + "bbox": [ + 186, + 590, + 609, + 607 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "First, let me examine both sets of figures:", + "bbox": [ + 184, + 613, + 339, + 623 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "**First Set (Top Row):**", + "bbox": [ + 186, + 628, + 274, + 637 + ], + "page_idx": 36 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Square with straight lines forming an angle in the bottom left", + "2. Square with zigzag diagonal lines", + "3. Square with straight lines forming an X plus a vertical line on the right" + ], + "bbox": [ + 186, + 638, + 457, + 661 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "**Second Set (Middle Row):**", + "bbox": [ + 186, + 667, + 297, + 676 + ], + "page_idx": 36 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Square with curved lines forming what looks like a distorted X", + "2. Square with a circle and curved lines", + "3. Question mark - to be determined" + ], + "bbox": [ + 186, + 676, + 429, + 699 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "**Key Observation:** The second set appears to use curved lines to create patterns similar to the straight-line patterns in the first set.", + "bbox": [ + 184, + 705, + 676, + 715 + ], + "page_idx": 36 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Looking at the relationship:", + "- First figure in set 1 has straight angles; first figure in set 2 has curved versions", + "- Second figure in set 1 has zigzag lines; second figure in set 2 has curved lines with a circle" + ], + "bbox": [ + 186, + 722, + 524, + 746 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "For the third position, I should look for a figure that uses curved lines to represent the X-pattern plus vertical line from the third figure in set 1.", + "bbox": [ + 184, + 753, + 715, + 763 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Looking at the options:", + "bbox": [ + 186, + 768, + 274, + 777 + ], + "page_idx": 36 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Option A: Curved lines forming semi-circles on left and right", + "- Option B: Curved lines forming leaf-like shapes in corners", + "- Option C: Curved lines forming an X-like pattern with additional curves", + "- Option D: Curved lines forming a star-like pattern" + ], + "bbox": [ + 186, + 777, + 460, + 809 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Option C seems most appropriate as it shows curved lines forming an X-like pattern, similar to how the third figure in the first set has straight lines forming an X. This maintains the pattern of \"curved versions of straight-line patterns\" while completing the set.", + "bbox": [ + 184, + 815, + 810, + 833 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Answer: C", + "bbox": [ + 186, + 839, + 228, + 847 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Error Category: Reasoning Error", + "bbox": [ + 184, + 851, + 379, + 864 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Error Reason: The model incorrectly chooses (C) because it overly focuses on surface-level visual similarity (the X-like shape) without accounting for the systematic transformation from the first row of blocks to the second row of blocks.", + "bbox": [ + 184, + 864, + 810, + 898 + ], + "page_idx": 36 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 36 + }, + { + "type": "page_number", + "text": "37", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Analogical (Hard)", + "text_level": 1, + "bbox": [ + 423, + 112, + 565, + 128 + ], + "page_idx": 37 + }, + { + "type": "image", + "img_path": "images/56af6eb71d6e2339d73f592d622135add9ce4337cf2f468cd37ce3f84689bcee.jpg", + "image_caption": [ + "Figure 25: A sample correct case of Analogical Reasoning (difficulty: Hard)." + ], + "image_footnote": [], + "bbox": [ + 593, + 112, + 612, + 126 + ], + "page_idx": 37 + }, + { + "type": "image", + "img_path": "images/f2b1736102b305edc25f255215e05f78a8d01fa59b7ea9760565a410ccf255c6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 179, + 138, + 403, + 170 + ], + "page_idx": 37 + }, + { + "type": "image", + "img_path": "images/b8c7c0234db97446c1b2e4d704709d806fe90a0242a41fc9f438630674fb830f.jpg", + "image_caption": [ + "A B C D" + ], + "image_footnote": [], + "bbox": [ + 222, + 172, + 362, + 204 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Question: This question contains two sets of figures and 4 figures to choose from. The two sets of figures have certain similarities as well as differences. You are required to choose one option that is most suitable to replace the question mark. The correct answer should not only make the two sets of figures the most similar, but also shows uniqueness for the second set of figures.", + "bbox": [ + 411, + 133, + 812, + 209 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Ground Truth: (C)", + "text_level": 1, + "bbox": [ + 413, + 210, + 524, + 224 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Claude-3.7-Sonnet-Thinking: Let me carefully analyze this problem.", + "bbox": [ + 184, + 229, + 488, + 239 + ], + "page_idx": 37 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "We have two sets of figures:", + "- First set: Three figures in a row", + "- Second set: Two figures and a question mark" + ], + "bbox": [ + 184, + 246, + 359, + 270 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "I need to find the best option (A, B, C, or D) to replace the question mark, such that the two sets become most similar while preserving the uniqueness of the second set.", + "bbox": [ + 184, + 276, + 808, + 286 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "First, let me analyze the first set:", + "bbox": [ + 184, + 292, + 308, + 301 + ], + "page_idx": 37 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. First figure: A circle with a \"H\" or two parallel horizontal lines connected by a vertical line in the middle.", + "2. Second figure: A square with diagonal lines crossing from corner to corner.", + "3. Third figure: A simple circle." + ], + "bbox": [ + 186, + 301, + 583, + 324 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Now, let me analyze the second set (before the question mark):", + "bbox": [ + 184, + 330, + 421, + 339 + ], + "page_idx": 37 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. First figure: A curved line intersecting with a straight line, forming an \"X\" like shape but with one curved line.", + "2. Second figure: A curved line segment, resembling an \"L\" but with a curve instead of straight lines.", + "3. Question mark: This is what we need to figure out." + ], + "bbox": [ + 186, + 340, + 602, + 364 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Let's look at the options:", + "bbox": [ + 184, + 369, + 279, + 378 + ], + "page_idx": 37 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "A: Two crossing lines with a small curve at one end of one line.", + "B: Two curved lines crossing in an \"X\" for", + "C: A circle with two perpendicular lines crossing it, forming four equal segments.", + "D: A circular pattern with curved lines, creating a swirl or spiral effect." + ], + "bbox": [ + 186, + 378, + 488, + 410 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Now, let me try to identify the pattern or relationship between the two sets:", + "bbox": [ + 184, + 417, + 467, + 426 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "In the first set, we start with a circle containing a shape (H), then move to a square with crossed lines, and finally a simple circle.", + "bbox": [ + 184, + 431, + 661, + 441 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "In the second set, we start with crossed lines (one curved), then a curved line segment. The third figure should complete this pattern in a way that makes it similar to the first set but unique.", + "bbox": [ + 184, + 448, + 810, + 465 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Looking at the transformation from first set to second set:", + "bbox": [ + 184, + 470, + 403, + 479 + ], + "page_idx": 37 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. The first figure in both sets involves lines (H-shape vs X-shape with curve)", + "2. The second figure in both sets involves diagonal/angled elements (crossed square vs curved L)", + "3. The third figure in the first set is a simple circle" + ], + "bbox": [ + 186, + 479, + 547, + 503 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "So what would make the third figure in the second set similar yet unique?", + "bbox": [ + 184, + 510, + 462, + 520 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Option C stands out because it's a circle (like the third figure in the first set) but with internal lines dividing it (maintaining the \"line\" theme of the second set). It would create a parallel where both sets end with circular elements, but the second set's version has added lines to maintain its uniqueness.", + "bbox": [ + 184, + 525, + 810, + 542 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Let me double-check by considering another angle. Maybe there's a transformation rule from each figure in the first set to the corresponding figure in the second set:", + "bbox": [ + 184, + 547, + 797, + 559 + ], + "page_idx": 37 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. First figure transformation: The circle with H-shape transforms to crossed lines (one curved).", + "2. Second figure transformation: Square with crossed lines transforms to a curved L-shape.", + "3. Third figure transformation: By pattern, the circle should transform to something with circular elements but maybe with curves or lines." + ], + "bbox": [ + 184, + 565, + 700, + 589 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Looking at options again, C and D both have circular elements. C has straight lines dividing the circle while D has a swirl pattern.", + "bbox": [ + 184, + 595, + 668, + 604 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Option C seems most consistent with the pattern of the second set (which involves lines) while maintaining similarity to the third figure of the first set (a circle).", + "bbox": [ + 184, + 611, + 779, + 621 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "So I believe C is the most likely answer.", + "bbox": [ + 184, + 627, + 334, + 635 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "", + "bbox": [ + 186, + 636, + 248, + 643 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "I'll solve this step by step by analyzing the two sets of figures and looking for patterns.", + "bbox": [ + 186, + 643, + 509, + 652 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "**First Set of Figures:**", + "bbox": [ + 184, + 657, + 271, + 666 + ], + "page_idx": 37 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. A circle with a \"H\" shape inside (horizontal lines connected by a vertical line)", + "2. A square with diagonal crossed lines", + "3. A simple circle" + ], + "bbox": [ + 186, + 666, + 480, + 690 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "**Second Set of Figures:**", + "bbox": [ + 186, + 698, + 282, + 705 + ], + "page_idx": 37 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. An intersection of a straight line and a curved line", + "2. A curved line in an L-like shape", + "3. Question mark - needs to be determined" + ], + "bbox": [ + 186, + 705, + 382, + 729 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "**Looking for the Pattern:**", + "bbox": [ + 186, + 736, + 289, + 743 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "I need to understand how these sets relate to each other to find the right option for the question mark.", + "bbox": [ + 186, + 743, + 565, + 752 + ], + "page_idx": 37 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "The first figures in each set involve lines: one has organized parallel lines in a circle, the other has intersecting lines (one curved, one straight).", + "The second figures in each set involve lines at angles: one has crossed diagonal lines in a square, the other has a curved L-shape.", + "For the third figure, the first set has a simple circle. Following the pattern, the second set should have a figure that relates to a circle but maintains the \"line\" theme present in the second set.", + "**Analyzing the Options:**", + "A: Two crossed lines with a curved end", + "B: Two curved lines crossing in an X pattern", + "C: A circle divided into four equal parts by perpendicular lines", + "D: A swirl pattern with curved lines", + "**Decision:**", + "Option C makes the most sense because:", + "1. It contains a circle (relating to the third figure in the first set)", + "2. It includes straight lines (maintaining the line theme of the second set)", + "3. The division of the circle into four parts creates a unique element for the second set while still maintaining similarity to the first set", + "Answer: C" + ], + "bbox": [ + 184, + 758, + 810, + 898 + ], + "page_idx": 37 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 37 + }, + { + "type": "page_number", + "text": "38", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Deductive (Easy)", + "text_level": 1, + "bbox": [ + 426, + 112, + 560, + 128 + ], + "page_idx": 38 + }, + { + "type": "image", + "img_path": "images/583d34c288f215b97b70b7361ce23897f12f100ad60e17ac83d9a8aebba2de98.jpg", + "image_caption": [ + "Figure 26: A sample error case of Deductive Reasoning (difficulty: Easy)." + ], + "image_footnote": [], + "bbox": [ + 593, + 112, + 612, + 127 + ], + "page_idx": 38 + }, + { + "type": "table", + "img_path": "images/14ab2b1c55d80d22512d08289291b338fbb40e27ab710ad313761a8d7b7d6cb2.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
GOBDE
NPKAJ
OZGFQ
WVIYN
IQTH
", + "bbox": [ + 184, + 133, + 320, + 239 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Question: Bruce ciphers words adopting the board demonstrated. For example, the word DVEJ is ciphered as GT WI GH NH. What word did Bruce cipher NQ OH OT WI?", + "bbox": [ + 333, + 152, + 812, + 190 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Options: A: XOCR. B: DJWS. C: KQFV. D: KFQV", + "bbox": [ + 334, + 191, + 625, + 204 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Ground Truth: (C)", + "bbox": [ + 334, + 210, + 447, + 224 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Claude-3.7-Sonnet-Thinking: I see a 5x5 grid of letters that appears to be the board used for Bruce's cipher. Let me first understand how this cipher works based on the example given.", + "bbox": [ + 184, + 247, + 812, + 266 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "The example states that the word \"DVEJ\" is ciphered as \"GT WI GH NH\".", + "bbox": [ + 184, + 270, + 459, + 281 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Let me analyze the pattern:", + "bbox": [ + 184, + 286, + 289, + 295 + ], + "page_idx": 38 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "D is ciphered as GT", + "V is ciphered as W1 E is ciphered as GH", + "- 1 is ciphered as GH\n- j is ciphered as NH" + ], + "bbox": [ + 186, + 295, + 267, + 327 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "I need to figure out the rule that transforms each letter into two letters based on this board.", + "bbox": [ + 184, + 333, + 524, + 343 + ], + "page_idx": 38 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Looking at the board, I can see that the first column contains G, N, O, W, I.", + "The first row contains G,O,B,D,E", + "The left column and top row are highlighted in green.", + "The bottom row is highlighted in yellow.", + "Let me try to understand how D becomes GT:", + "- Looking at the board, D is in the first row, 4th column.", + "-G is the letter in the first row, first column.", + "- T is not directly visible in the board as shown." + ], + "bbox": [ + 184, + 348, + 462, + 421 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Let me try a different approach. Maybe the cipher works by identifying the row and column of each letter.", + "bbox": [ + 184, + 426, + 581, + 436 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "For the letter D:", + "bbox": [ + 184, + 441, + 246, + 450 + ], + "page_idx": 38 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- D is in the first row, 4th column.", + "- The letter at the beginning of row 1 is G.", + "- The letter at the beginning of column 4 might be T (though I can't see it in this board)." + ], + "bbox": [ + 186, + 450, + 509, + 474 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "For the letter V:", + "bbox": [ + 184, + 481, + 245, + 489 + ], + "page_idx": 38 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- V appears to be in the 4th row, 2nd column.", + "- The letter at the beginning of row 4 is W." + ], + "bbox": [ + 186, + 489, + 354, + 506 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "- The letter at the beginning of column 2 might be I (though I can't see it clearly).", + "bbox": [ + 184, + 506, + 485, + 513 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Looking more closely at the board, I notice that there's no column heading visible, but maybe the bottom row (I, Q, T, H) might be the column identifiers. This seems plausible.", + "bbox": [ + 184, + 518, + 808, + 537 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Let me check if this makes sense:", + "bbox": [ + 184, + 542, + 310, + 551 + ], + "page_idx": 38 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- D is in row 1, column $4 \\rightarrow G$ (row 1 identifier) + T (column 4 identifier) = GT", + "- V is in row 4, column $2 \\rightarrow W$ (row 4 identifier) + Q (column 2 identifier)" + ], + "bbox": [ + 186, + 551, + 475, + 569 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "But this doesn't match the example where V is ciphered as WI, not WQ.", + "bbox": [ + 184, + 574, + 452, + 584 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Maybe the column identifiers are different or the rule is different. Let me re-examine...", + "bbox": [ + 184, + 589, + 506, + 599 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Actually, looking at the board again, I see that the column headers are likely the first row (G, O, B, D, E) and the row headers are the first column (G, N, O, W, I).", + "bbox": [ + 184, + 604, + 779, + 616 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "So:", + "bbox": [ + 184, + 621, + 200, + 628 + ], + "page_idx": 38 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- D is at position (1, 4), so its coordinates would be GD (row G, column D).", + "- V isn't clearly visible in the grid shown." + ], + "bbox": [ + 186, + 628, + 462, + 646 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Let me try yet another approach. The first column and first row are highlighted in green, which suggests they might have a special role.", + "bbox": [ + 184, + 651, + 689, + 662 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Maybe the rule is:", + "bbox": [ + 184, + 667, + 254, + 676 + ], + "page_idx": 38 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- For each letter in the original word, find its position in the board.", + "- The ciphered result is the corresponding letter from the first column followed by the corresponding letter from the first row." + ], + "bbox": [ + 186, + 676, + 651, + 693 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Let me check this for D:", + "bbox": [ + 184, + 698, + 274, + 707 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "- If D is at position (r, c) in the board, the ciphered result would be the letter in the first column at row r, followed by the letter in the first row at column c.", + "bbox": [ + 184, + 707, + 756, + 715 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "But I still don't see how D becomes GT based on the board provided.", + "bbox": [ + 184, + 720, + 442, + 732 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Wait, maybe I need to interpret the board differently. Looking at the board, I notice:", + "bbox": [ + 184, + 737, + 496, + 746 + ], + "page_idx": 38 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- G is in the first row, first column.", + "- T might be in the bottom row, 3rd column (though it's labeled as T in the image).", + "- W is in the 4th row, first column.", + "- I is in the 3rd row, 3rd column (or in the bottom row, 1st column).", + "- G is in the first row, first column.", + "-H is in the bottom row, 4th column.", + "- N is in the 2nd row, first column.", + "- H is in the bottom row, 4th column." + ], + "bbox": [ + 186, + 746, + 434, + 809 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "I'm still not seeing a clear pattern.", + "bbox": [ + 184, + 815, + 313, + 824 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "", + "bbox": [ + 186, + 824, + 250, + 833 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Error Category: Reject to Answer", + "bbox": [ + 184, + 837, + 382, + 849 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Error Reason: The model reject to answer since it fails to settle on a single consistent ciphering rule or final answer. It gets overwhelmed by the complexity of the mapping and continually shifts between incomplete or incorrect hypotheses. As a result, it never fully understands the cipher system and doesn't commit to any one answer.", + "bbox": [ + 184, + 849, + 812, + 898 + ], + "page_idx": 38 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 38 + }, + { + "type": "page_number", + "text": "39", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Deductive (Easy)", + "text_level": 1, + "bbox": [ + 426, + 111, + 560, + 128 + ], + "page_idx": 39 + }, + { + "type": "image", + "img_path": "images/16fa6aae6f434bae2f7c980d27911263d2f0d6dcdde43098f76431b41a04006d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 593, + 112, + 612, + 126 + ], + "page_idx": 39 + }, + { + "type": "image", + "img_path": "images/13c356b90f683f73e24d1a4dadc822c871d32f6cf7b204e3c8cfa4b0e7433dad.jpg", + "image_caption": [ + "Figure 27: A sample correct case of Deductive Reasoning (difficulty: Easy)." + ], + "image_footnote": [], + "bbox": [ + 212, + 138, + 509, + 287 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Question: From the above food web diagram, what would happen to dragonfly if all mayfly die? Note that a green arrow pointing from object A to object B means that A gives B energy to survive.", + "bbox": [ + 547, + 156, + 800, + 229 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Options: A: remains the same. B: increase. C: decrease. D: NA", + "bbox": [ + 547, + 229, + 795, + 253 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Ground Truth: (C)", + "bbox": [ + 549, + 263, + 661, + 277 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Claude-3.7-Sonnet-Thinking: Let me analyze the food web diagram to understand the relationships between different organisms.", + "bbox": [ + 183, + 297, + 808, + 325 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "In this food web:", + "bbox": [ + 184, + 335, + 287, + 347 + ], + "page_idx": 39 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Green arrows point from an organism that gives energy to another organism (food source $\\rightarrow$ consumer)", + "- I need to examine what would happen to dragonfly nymphs if all mayfly nymphs die" + ], + "bbox": [ + 184, + 347, + 797, + 373 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Looking at the diagram:", + "bbox": [ + 184, + 383, + 330, + 397 + ], + "page_idx": 39 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Mayfly nymphs feed on pondweed (arrow from pondweed to mayfly)", + "- Dragonfly nymphs feed on mayfly nymphs (arrow from mayfly to dragonfly)", + "- Dragonfly nymphs also feed on freshwater shrimp (arrow from freshwater shrimp to dragonfly)", + "- Brown trout feeds on dragonfly nymphs (arrow from dragonfly to brown trout)" + ], + "bbox": [ + 184, + 397, + 754, + 446 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "If all mayfly nymphs die:", + "bbox": [ + 184, + 457, + 334, + 470 + ], + "page_idx": 39 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Dragonfly nymphs would lose one of their food sources (mayfly nymphs)", + "- Dragonfly nymphs would still have freshwater shrimp as a food source", + "- But losing mayfly nymphs would reduce the total available food for dragonfly nymphs" + ], + "bbox": [ + 184, + 470, + 702, + 508 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Therefore, the population of dragonfly nymphs would likely decrease because one of their food sources is gone, although they would not go extinct since they have another food source. This makes the correct answer (C) decrease.", + "bbox": [ + 183, + 518, + 810, + 555 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "", + "bbox": [ + 184, + 556, + 281, + 568 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Analysis of Food Web and Effect on Dragonfly Nymphs", + "bbox": [ + 184, + 568, + 524, + 580 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Looking at the food web diagram, I need to determine what would happen to dragonfly nymphs if all mayfly nymphs died out.", + "bbox": [ + 183, + 590, + 810, + 618 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Examining the Relationships", + "bbox": [ + 184, + 628, + 375, + 641 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "I can see that green arrows point from one organism to another, indicating energy transfer (food source $\\rightarrow$ consumer).", + "bbox": [ + 183, + 641, + 807, + 666 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "From the diagram, I can observe:", + "bbox": [ + 184, + 676, + 380, + 689 + ], + "page_idx": 39 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Mayfly nymphs feed on pondweed", + "- Dragonfly nymphs feed on mayfly nymphs", + "- Dragonfly nymphs also feed on freshwater shrimp", + "- Brown trout feeds on dragonfly nymphs" + ], + "bbox": [ + 184, + 690, + 490, + 739 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "## Impact Analysis", + "bbox": [ + 184, + 751, + 302, + 763 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "If all mayfly nymphs died:", + "bbox": [ + 184, + 763, + 344, + 776 + ], + "page_idx": 39 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Dragonfly nymphs would lose one of their primary food sources", + "2. Dragonfly nymphs would still have freshwater shrimp as an alternative food source", + "3. However, the loss of mayfly nymphs would reduce the total available food for dragonfly nymphs" + ], + "bbox": [ + 184, + 776, + 769, + 813 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Conclusion", + "bbox": [ + 184, + 824, + 274, + 835 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Since dragonfly nymphs would lose a significant food source (mayfly nymphs), their population would likely decline, even though they have another food source. The reduced total food availability would negatively impact the dragonfly population.", + "bbox": [ + 183, + 835, + 810, + 875 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Answer: C", + "bbox": [ + 184, + 885, + 251, + 897 + ], + "page_idx": 39 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 39 + }, + { + "type": "page_number", + "text": "40", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Deductive (Medium)", + "text_level": 1, + "bbox": [ + 411, + 111, + 576, + 128 + ], + "page_idx": 40 + }, + { + "type": "image", + "img_path": "images/01e60a93178e8267e318f99e39c4cca1b82b8f7d2705c3cfe0ae49ccbac99c55.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 593, + 112, + 612, + 127 + ], + "page_idx": 40 + }, + { + "type": "image", + "img_path": "images/4798f6c391395f4a6803778aab8ae4f3f1792d475d51602ec2691394a3f0b0da.jpg", + "image_caption": [ + "Figure 28: A sample error case of Deductive Reasoning (difficulty: Medium)." + ], + "image_footnote": [], + "bbox": [ + 179, + 148, + 364, + 209 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Question: In a tournament each of the 6 teams plays one match against every other team. In each round of matches, 3 take place simultaneously. A TV station has already decided which match it will broadcast for each round, as shown in the diagram. In which round will team D play against team F?", + "bbox": [ + 369, + 140, + 812, + 200 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Options: A: 1. B: 2. C: 3. D: 4", + "bbox": [ + 370, + 202, + 544, + 215 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Ground Truth: (A)", + "text_level": 1, + "bbox": [ + 370, + 222, + 483, + 236 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Claude-3.7-Sonnet-Thinking: This question is asking about a tournament where each team plays one match against every other team. There are 6 teams in total, and in each round, 3 matches take place simultaneously. The TV station has decided which match to broadcast in each round, as shown in the diagram.", + "bbox": [ + 184, + 244, + 812, + 263 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Let's analyze what the diagram tells us:", + "bbox": [ + 184, + 268, + 334, + 279 + ], + "page_idx": 40 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Round 1: The match being broadcast is A-B", + "- Round 2: The match being broadcast is C-D", + "- Round 3: The match being broadcast is A-E", + "- Round 4: The match being broadcast is E-F", + "- Round 5: The match being broadcast is A-C" + ], + "bbox": [ + 184, + 285, + 354, + 324 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "We need to determine in which round team D will play against team F.", + "bbox": [ + 184, + 330, + 450, + 340 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "First, let's think about how many rounds there will be in total. With 6 teams, each team needs to play against 5 other teams. So there are a total of $(6 \\times 5) / 2 = 15$ matches. In each round, 3 matches take place, so there will be $15 / 3 = 5$ rounds in total.", + "bbox": [ + 184, + 345, + 810, + 364 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "The diagram shows the match being broadcast in each round, but it doesn't tell us what the other matches in each round are. To figure this out, I need to work out the full schedule of matches.", + "bbox": [ + 184, + 369, + 810, + 388 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "In round 1, A plays against B (as shown in the diagram). So teams C, D, E, and F must form the other two matches in round 1. Let's say these are C-D and E-F. In round 2, C plays against D (as shown in the diagram). So teams A, B, E, and F must form the other two matches in round 2. Let's say these are A-E and B-F.", + "bbox": [ + 184, + 393, + 771, + 411 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "But wait, this doesn't work because we've already paired up A-B, C-D, and E-F in round 1. Let's try a different approach.", + "bbox": [ + 184, + 416, + 633, + 426 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Let's try to systematically figure out the schedule. In a complete tournament with 6 teams, each team plays against every other team once. So each team plays 5 matches. There are a total of $(6 \\times 5) / 2 = 15$ matches. With 3 matches per round, we need $15 / 3 = 5$ rounds, as I calculated earlier.", + "bbox": [ + 184, + 431, + 812, + 450 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "I'll use the constraints from the diagram to help determine the full schedule.", + "bbox": [ + 184, + 455, + 470, + 465 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Round 1: A-B is one of the matches. The other two matches must use teams C, D, E, and F. Let's try to determine what those are.", + "bbox": [ + 184, + 470, + 661, + 481 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Round 2: C-D is one of the matches. The other two matches must use teams A, B, E, and F.", + "bbox": [ + 184, + 487, + 521, + 496 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Round 3: A-E is one of the matches. The other two matches must use teams B, C, D, and F.", + "bbox": [ + 184, + 502, + 521, + 511 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Round 4: E-F is one of the matches. The other two matches must use teams A, B, C, and D.", + "bbox": [ + 184, + 517, + 521, + 527 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Round 5: A-C is one of the matches. The other two matches must use teams B, D, E, and F.", + "bbox": [ + 184, + 532, + 521, + 542 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "I'll try filling in a table of which teams play in each round.", + "bbox": [ + 184, + 549, + 403, + 559 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Round 1: A-B, C-? E-?", + "bbox": [ + 184, + 565, + 271, + 574 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Round 2: C-D, A-? , ?-", + "bbox": [ + 186, + 574, + 271, + 582 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Round 3: A-E, ?-? , ?-?", + "bbox": [ + 186, + 582, + 267, + 589 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Round 4: E-F, ?-? , ?-?", + "bbox": [ + 186, + 589, + 267, + 597 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Round 5: A-C, ?-? , ?-?", + "bbox": [ + 186, + 597, + 267, + 604 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Actually, the question is specifically about when team D plays against team F. So, I really just need to find out which round that happens in.", + "bbox": [ + 184, + 611, + 705, + 621 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "From the diagram, we know which matches are broadcast in each round:", + "bbox": [ + 184, + 627, + 457, + 635 + ], + "page_idx": 40 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Round 1: A-B", + "- Round 2: C-D", + "- Round 3: A-F", + "Round 9: A-E Round 4: F-E", + "Round 1: A C" + ], + "bbox": [ + 186, + 636, + 243, + 671 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "So the matches involving D are:", + "bbox": [ + 184, + 681, + 305, + 689 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "- Round 2: D plays against C.", + "bbox": [ + 186, + 690, + 295, + 698 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "- Other rounds: D plays against A, B, E, and F (one in each round).", + "bbox": [ + 186, + 698, + 433, + 705 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "The matches involving F are:", + "bbox": [ + 186, + 713, + 295, + 720 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "- Round 4: F plays against E. \n- Other rounds: F plays against A, B, C, and D (one in each round).", + "bbox": [ + 186, + 720, + 433, + 738 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Now, in which round do D and F play against each other? It can't be round 2, because D plays against C in that round. It can't be round 4, because F plays against E in that round. So D and F play against each other in either round 1, 3, or 5.", + "bbox": [ + 184, + 743, + 808, + 761 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Let's check round 1. We know A plays against B. D and F could potentially play against each other in round 1.", + "bbox": [ + 184, + 767, + 594, + 776 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Let's check round 3. We know A plays against E. The other teams (B, C, D, and F) form the other two matches. If D plays against F in this round, then B must play against C.", + "bbox": [ + 184, + 776, + 808, + 791 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Let's check round 5. We know A plays against C. The other teams (B, D, E, and F) form the other two matches. If D plays against F in this round, then B must play against E.", + "bbox": [ + 184, + 791, + 808, + 806 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Hmm, I'm having trouble determining when D plays against F.", + "bbox": [ + 184, + 813, + 421, + 821 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "", + "bbox": [ + 186, + 821, + 248, + 830 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Error Category: Reject to Answer", + "text_level": 1, + "bbox": [ + 184, + 835, + 382, + 848 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Error Reason: The model rejects to answer since it fails to complete the full tournament schedule or isolate the specific round where D plays F. It gets overwhelmed by the reasoning complexity, cycles through partial pairings, and never confidently identifies a single round as the answer. As a result, it ends with indecision and no final response.", + "bbox": [ + 184, + 848, + 810, + 897 + ], + "page_idx": 40 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 40 + }, + { + "type": "page_number", + "text": "41", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Deductive (Medium)", + "text_level": 1, + "bbox": [ + 411, + 111, + 576, + 128 + ], + "page_idx": 41 + }, + { + "type": "image", + "img_path": "images/c026a961938ed94885c911fa25839173a82101bc93bfae62d8fee252741b6a94.jpg", + "image_caption": [ + "Figure 29: A sample correct case of Deductive Reasoning (difficulty: Medium)." + ], + "image_footnote": [], + "bbox": [ + 593, + 112, + 612, + 126 + ], + "page_idx": 41 + }, + { + "type": "table", + "img_path": "images/6e550321c6f928d460bbfa8999034f2fb0a676f1d34b0fb01b756caa063ddc83.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
LWBVMQ
ZPUKGJ
ADHLRZ
GSXEOY
KICAFT
MFICS
", + "bbox": [ + 178, + 135, + 316, + 241 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Question: Jeff represents words applying the board demonstrated. For example, the word DCXEH is represented as AM KF GF GI AF. What word did Jeff represent KS KF LC LF GM?", + "bbox": [ + 326, + 152, + 797, + 190 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Options: A: TSMBC. B: TCMBS. C: PEQGT. D: FHMTK", + "bbox": [ + 328, + 191, + 656, + 204 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Ground Truth: (B)", + "text_level": 1, + "bbox": [ + 328, + 214, + 441, + 228 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Claude-3.7-Sonnet-Thinking: I'm given a grid or board that represents a mapping from letters to two-letter codes. Let's examine how the mapping works by looking at the example.", + "bbox": [ + 184, + 243, + 812, + 262 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "The word DCXEH is represented as AM KF GF GI AF.", + "bbox": [ + 184, + 267, + 388, + 277 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Let me determine the pattern:", + "bbox": [ + 184, + 284, + 299, + 292 + ], + "page_idx": 41 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- D is represented as AM", + "- C is represented as KF", + "- X is represented as GF", + "E is represented as GI", + "- H is represented as AF" + ], + "bbox": [ + 186, + 292, + 300, + 330 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Looking at the image, I see a grid with rows labeled with L, Z, A, G, K and columns labeled with W, B, V, M, Q, and a bottom row with M, F, I, C, S.", + "bbox": [ + 184, + 337, + 732, + 347 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "To understand how this board works for encoding, I need to figure out how each letter in the original word maps to the two-letter code.", + "bbox": [ + 184, + 353, + 691, + 363 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "For D, it maps to AM. D appears in the first column of the row labeled A. So A (row) + M (column bottom row) = AM for D.", + "bbox": [ + 184, + 369, + 645, + 378 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "For C, it maps to KF. C appears in the third column of the row labeled K. So K (row) + F (column bottom row) = KF for C.", + "bbox": [ + 184, + 378, + 640, + 386 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "For X, it maps to GF. X appears in the third column of the row labeled G. So G (row) + F (column bottom row) = GF for X.", + "bbox": [ + 186, + 386, + 638, + 393 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "For E, it maps to GI. E appears in the fourth column of the row labeled G. So G (row) + I (column bottom row) = GI for E.", + "bbox": [ + 186, + 393, + 638, + 401 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "For H, it maps to AF. H appears in the third column of the row labeled A. So A (row) + F (column bottom row) = AF for H.", + "bbox": [ + 186, + 401, + 640, + 409 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "So the pattern seems to be:", + "bbox": [ + 184, + 416, + 285, + 424 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "1. Locate the letter in the grid", + "bbox": [ + 186, + 424, + 297, + 431 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "2. The first letter of the code is the row label", + "bbox": [ + 186, + 431, + 351, + 438 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "3. The second letter of the code is the value in the bottom row under the column where the letter is found", + "bbox": [ + 186, + 439, + 576, + 446 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Now let's decode KS KF LC LF GM:", + "bbox": [ + 186, + 455, + 321, + 463 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "KS:", + "bbox": [ + 186, + 470, + 200, + 478 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Row K, column with bottom row value S. This refers to column 5, which contains T.", + "bbox": [ + 186, + 478, + 498, + 486 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "KF:", + "bbox": [ + 186, + 494, + 200, + 501 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Row K, column with bottom row value F. This refers to column 2, which contains C.", + "bbox": [ + 186, + 501, + 500, + 510 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "LC:", + "bbox": [ + 186, + 517, + 200, + 523 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Row L, column with bottom row value C. This refers to column 4, which contains M.", + "bbox": [ + 186, + 523, + 501, + 532 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "LF:", + "bbox": [ + 186, + 541, + 199, + 547 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Row L, column with bottom row value F. This refers to column 2, which contains B.", + "bbox": [ + 186, + 547, + 496, + 555 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "GM", + "bbox": [ + 186, + 563, + 200, + 569 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Row G, column with bottom row value M. This refers to column 1, which contains S.", + "bbox": [ + 186, + 571, + 501, + 580 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "So the decoded word is TCMBS.", + "bbox": [ + 186, + 587, + 307, + 594 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "", + "bbox": [ + 186, + 595, + 248, + 603 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "# Decoding the Word Representation", + "bbox": [ + 186, + 603, + 326, + 611 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "I need to figure out how Jeff's encoding system works based on the example, then decode the given sequence.", + "bbox": [ + 184, + 618, + 594, + 627 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "## Understanding the Encoding System", + "bbox": [ + 186, + 633, + 336, + 643 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "In the example:", + "bbox": [ + 186, + 650, + 246, + 657 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "- DCXEH is represented as AM KF GFI AF", + "bbox": [ + 186, + 657, + 354, + 665 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Looking at the grid provided in the image, I can see this is a coordinate system:", + "bbox": [ + 186, + 672, + 480, + 681 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "- The rows are labeled with letters L, Z, A, G, K", + "bbox": [ + 186, + 681, + 362, + 689 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "- The columns have a bottom row with values M, F, I, C, S", + "bbox": [ + 186, + 689, + 403, + 696 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "The encoding seems to work like this:", + "bbox": [ + 186, + 704, + 326, + 710 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "1. For each letter in the original word, find its position in the grid", + "bbox": [ + 186, + 710, + 429, + 720 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "2. The first part of the code is the row label", + "bbox": [ + 186, + 720, + 346, + 727 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "3. The second part is the value from the bottom row of that column", + "bbox": [ + 186, + 727, + 434, + 734 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Let me verify with the example:", + "bbox": [ + 186, + 743, + 305, + 751 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "- D is in row A, column with M at bottom $\\rightarrow$ AM", + "bbox": [ + 186, + 751, + 369, + 758 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "- C is in row K, column with F at bottom $\\rightarrow$ KF", + "bbox": [ + 186, + 758, + 359, + 766 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "- X is in row G, column with F at bottom $\\rightarrow$ GP", + "bbox": [ + 186, + 766, + 359, + 773 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "- E is in row G, column with I at bottom $\\rightarrow$ GI", + "bbox": [ + 186, + 773, + 359, + 781 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "- H is in row $A_{t}$ column with F at bottom $\\rightarrow \\mathrm{AF}$ .", + "bbox": [ + 186, + 781, + 361, + 789 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "## Decoding the Given Sequence", + "bbox": [ + 186, + 797, + 312, + 806 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Now let's decode KS KF LC LF GM:", + "bbox": [ + 186, + 813, + 320, + 821 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "- KS: Row K, column with S at bottom $\\rightarrow$ The letter is T", + "bbox": [ + 186, + 828, + 393, + 835 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "- KF: Row K, column with F at bottom $\\rightarrow$ The letter is C", + "bbox": [ + 186, + 835, + 393, + 844 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "- LC: Row L, column with C at bottom $\\rightarrow$ The letter is M", + "bbox": [ + 186, + 844, + 395, + 852 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "- LF: Row $\\mathrm{L}_r$ , column with F at bottom → The letter is B", + "bbox": [ + 186, + 852, + 390, + 859 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "- GM: Row G, column with M at bottom $\\rightarrow$ The letter is S", + "bbox": [ + 186, + 859, + 398, + 867 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "This gives us TCMBS, which matches option (B).", + "bbox": [ + 186, + 875, + 367, + 883 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Answer: B", + "bbox": [ + 186, + 890, + 227, + 898 + ], + "page_idx": 41 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 41 + }, + { + "type": "page_number", + "text": "42", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Deductive (Hard)", + "text_level": 1, + "bbox": [ + 424, + 112, + 563, + 128 + ], + "page_idx": 42 + }, + { + "type": "image", + "img_path": "images/ec18a4ac4c2468c7500f4f846ec923d07b52c062bec7b6f9ea05e65fb8aa82cd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 593, + 112, + 612, + 126 + ], + "page_idx": 42 + }, + { + "type": "image", + "img_path": "images/7e313e170b470582b9ae9c2a7973d28c3e99cdd5aa3de4c0ceb96af05e72e7c3.jpg", + "image_caption": [ + "Figure 30: A sample error case of Deductive Reasoning (difficulty: Hard)." + ], + "image_footnote": [], + "bbox": [ + 212, + 133, + 436, + 258 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Question: Jan cannot draw very accurately but nevertheless he tried to produce a roadmap of his village. The relative position of the houses and the street crossings are all correct but three of the roads are actually straight and only Qurwik street is not. Who lives in Qurwik street?", + "bbox": [ + 467, + 140, + 792, + 214 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Options: A: Amy. B: Ben. C: Carol. D: David", + "bbox": [ + 468, + 214, + 732, + 227 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Ground Truth: (C)", + "text_level": 1, + "bbox": [ + 468, + 239, + 581, + 253 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Claude-3.7-Sonnet-Thinking: In this problem, I'm given a drawing of a village roadmap made by Jan. According to the problem:", + "bbox": [ + 184, + 267, + 710, + 277 + ], + "page_idx": 42 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. The relative positions of houses and street crossings are correct", + "2. Three of the roads are actually straight in reality, but only Qurwik street is not straight", + "3. The houses are labeled with the names: Amy, Ben, Carol, and David", + "4. I need to determine who lives on Qurwik street" + ], + "bbox": [ + 186, + 279, + 516, + 306 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Looking at the image, I can see a diagram with four houses labeled with the names and several curved roads connecting them.", + "bbox": [ + 184, + 313, + 656, + 323 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Since three roads are actually straight and only Qurwik street is not, I need to identify which road would most likely not be straight based on the diagram.", + "bbox": [ + 184, + 329, + 759, + 339 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "The roads in the diagram appear to be:", + "bbox": [ + 184, + 345, + 333, + 353 + ], + "page_idx": 42 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. A road connecting Amy's house to the network", + "2. A road connecting Carol's house to the network", + "3. A road connecting David's house to the network", + "4. A road connecting Ben's house to the network", + "5. Various segments connecting these houses together" + ], + "bbox": [ + 186, + 354, + 387, + 393 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Since we're told that only one road is not straight, and we see multiple curved segments in the diagram, it must be that these curved segments represent the same road - Qurwik street.", + "bbox": [ + 184, + 400, + 810, + 416 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Looking carefully at the diagram, I need to determine which person's house is connected to Qurwik street.", + "bbox": [ + 184, + 422, + 581, + 431 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "The diagram shows curved roads connecting different houses. Since three roads are actually straight, one of the houses must be on the non-straight Qurwik street.", + "bbox": [ + 184, + 438, + 789, + 448 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Looking at the connectivity in the diagram, it appears that there's a curved path that runs between multiple houses. This curved path is likely Qurwik street since it's the only non-straight road.", + "bbox": [ + 184, + 454, + 810, + 470 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Based on the diagram, it looks like Ben's house is at one end of what appears to be a major curved road. David's house seems to be connected to this same curved road. Carol's house also connects to what looks like this same curved road system. Amy's house seems to be connected to part of this curved network as well.", + "bbox": [ + 184, + 477, + 810, + 494 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "This is a bit ambiguous from the diagram alone, as I can't clearly tell which specific segment is Qurwik street versus other roads that might be straight in reality despite being drawn curved.", + "bbox": [ + 184, + 500, + 810, + 517 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Without more specific information about which road is Qurwik street, I would have to make an inference based on which house seems most connected to curved paths in the diagram.", + "bbox": [ + 184, + 523, + 810, + 541 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Looking at the houses and their positions relative to the roads:", + "bbox": [ + 184, + 547, + 419, + 555 + ], + "page_idx": 42 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Amy's house appears to be connected to a curved road segment", + "- Ben's house appears to be at the end of a curved road segment", + "- Carol's house connects to a curved road segment", + "- David's house connects to multiple curved segments" + ], + "bbox": [ + 186, + 556, + 429, + 587 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Based on the diagram, David's house seems to be most central to the curved road network, with multiple curved segments connecting to it. This suggests David might be on Qurwik street.", + "bbox": [ + 184, + 594, + 810, + 611 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "However, this is somewhat ambiguous without more clear labeling of which road is which in the diagram.", + "bbox": [ + 184, + 617, + 581, + 625 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "", + "bbox": [ + 186, + 626, + 246, + 633 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Solving the Village Roadmap Problem", + "bbox": [ + 186, + 633, + 336, + 642 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "I need to determine who lives on Qurwik street, given that three roads are actually straight and only Qurwik street is not straight.", + "bbox": [ + 184, + 648, + 666, + 657 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Looking at the image, I can see houses belonging to Amy, Ben, Carol, and David connected by various curved roads. Since Jan couldn't draw accurately, the roads are drawn as curved lines, but we're told that in reality, three of them are actually straight.", + "bbox": [ + 184, + 664, + 807, + 681 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Analyzing the Road Network", + "bbox": [ + 184, + 686, + 310, + 696 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "In the diagram, I can see:", + "bbox": [ + 184, + 704, + 281, + 710 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "- Four houses connected by a network of curved roads", + "bbox": [ + 186, + 712, + 388, + 719 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "- Multiple intersections between these roads", + "bbox": [ + 186, + 720, + 352, + 726 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Each house is connected to at least one road", + "bbox": [ + 186, + 727, + 354, + 734 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Identifying Qurwik Street", + "bbox": [ + 186, + 742, + 297, + 751 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "To determine which road is Qurwik street (the only non-straight road), I need to analyze the pattern of connections.", + "bbox": [ + 184, + 757, + 614, + 767 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Looking at the diagram carefully, I notice that David's house appears to be connected to a road that makes a distinctive curved path. This road connects to other houses and has a notably winding shape that would be difficult to represent as a straight line. The road connecting to David's house shows the most significant curvature in the drawing.", + "bbox": [ + 184, + 773, + 810, + 797 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Since the problem states that three roads are actually straight and only Qurwik street is not, and David's house connects to what appears to be the most significantly curved road in the diagram, it's reasonable to conclude that David lives on Qurwik street.", + "bbox": [ + 184, + 804, + 810, + 821 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Answer: D", + "bbox": [ + 184, + 827, + 228, + 835 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Error Category: Understanding Error", + "bbox": [ + 184, + 845, + 406, + 858 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Error Reason: The model misinterprets the condition that only one road (Qurwik street) is not straight, while the other three are. It misunderstands the roads that seem to be curved in the image are not all really curved.", + "bbox": [ + 184, + 859, + 810, + 893 + ], + "page_idx": 42 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 42 + }, + { + "type": "page_number", + "text": "43", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Deductive (Hard)", + "text_level": 1, + "bbox": [ + 424, + 111, + 563, + 128 + ], + "page_idx": 43 + }, + { + "type": "image", + "img_path": "images/60adf99283738943138682dd44b10b03153727886a6da65dcb8737ff516f26e7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 593, + 112, + 612, + 126 + ], + "page_idx": 43 + }, + { + "type": "image", + "img_path": "images/61b015d2546abf578760e8cc3932dd392875f94640dc490a388206fc830df894.jpg", + "image_caption": [ + "Figure 31: A sample correct case of Deductive Reasoning (difficulty: Hard)." + ], + "image_footnote": [], + "bbox": [ + 179, + 141, + 379, + 258 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Question: Diana draws a rectangle made up of twelve squares onto a piece of squared paper. Some of the squares are coloured in black. She writes the number of adjacent black squares into every white square. The diagram shows an example of such a rectangle. Now she does the same with a rectangle made up of 2018 squares. What is the biggest number that she can obtain as the sum of all numbers in the white squares?", + "bbox": [ + 385, + 143, + 802, + 228 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Options: A: 2800. B: 2950. C: 3025. D: 3100", + "bbox": [ + 388, + 229, + 640, + 241 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Ground Truth: (C)", + "bbox": [ + 388, + 250, + 501, + 263 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Claude-3.7-Sonnet-Thinking: In this problem, Diana has a rectangle made up of squares, some of which are colored black. Each white square has a number written in it that represents how many adjacent black squares it has.", + "bbox": [ + 184, + 273, + 810, + 292 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "In the example given:", + "bbox": [ + 184, + 297, + 269, + 306 + ], + "page_idx": 43 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The first row: 1, [black], 2, 1", + "- The second row: 0, 3, [black], 1", + "- The third row: 1, [black], 2, 1" + ], + "bbox": [ + 186, + 306, + 305, + 330 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "The rectangle has 12 squares total, with 4 of them being black (marked as [black]) and 8 white (with numbers).", + "bbox": [ + 184, + 337, + 596, + 347 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Now, Diana does the same with a rectangle made up of 2018 squares. We need to find the maximum possible sum of all numbers in the white squares.", + "bbox": [ + 184, + 352, + 743, + 362 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Let's think about how to maximize this sum. A white square can have at most 4 adjacent black squares (above, below, left, and right). So each white square can contribute at most 4 to the sum.", + "bbox": [ + 184, + 368, + 808, + 385 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Let's consider the example given, which is a $3 \\times 4$ grid. There are 4 black squares and 8 white squares. The sum of numbers in white squares is $1 + 2 + 1 + 0 + 3 + 1 + 1 + 2 + 1 = 12$ .", + "bbox": [ + 184, + 391, + 808, + 407 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "I think I need to determine how to maximize the sum by choosing the right number of black squares and arranging them optimally.", + "bbox": [ + 184, + 414, + 674, + 425 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "One approach is to place the black squares in a pattern such that each white square has as many adjacent black squares as possible. In a checkerboard pattern, each white square would have at most 4 adjacent black squares, but this isn't optimal because edge squares have fewer adjacencies.", + "bbox": [ + 184, + 430, + 808, + 448 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Act, t a h n .", + "bbox": [ + 184, + 453, + 808, + 470 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "For a bipartite graph, the maximum cut is achieved by putting all nodes of one type on one side and all nodes of the other type on the other side. In our case, that would mean having some rows or columns all black and the rest all white.", + "bbox": [ + 184, + 477, + 808, + 494 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Let's say our grid is $\\mathbf{r} \\times \\mathbf{c}$ , and we color some rows all black and the rest all white. If we color k rows all black, then the number of adjacencies between black and white squares would be $\\mathrm{k} \\times \\mathrm{c} \\times 2$ (because each row has c squares, and each black square in a row can have at most 2 adjacencies with white squares, one above and one below).", + "bbox": [ + 184, + 500, + 808, + 525 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Let's consider a more specific pattern. Imagine our grid is $2 \\times 1009$ (so it has 2018 squares total). We color the bottom row all black and the top row all white. Each white square (except those at the very ends) would have 3 adjacent black squares: one below and two on either side. The white squares at the ends would have 2 adjacent black squares.", + "bbox": [ + 184, + 531, + 808, + 556 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "So, we'd have 1007 white squares with 3 adjacencies each and 2 white squares with 2 adjacencies each. The total sum would be $1007 \\times 3 + 2 \\times 2 = 3025$ .", + "bbox": [ + 184, + 561, + 743, + 571 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "", + "bbox": [ + 186, + 571, + 248, + 580 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Solving the Maximum Sum Problem for Diana's Rectangle", + "bbox": [ + 186, + 579, + 411, + 588 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "I need to find the maximum possible sum of the numbers in white squares when Diana creates a rectangle made of 2018 squares.", + "bbox": [ + 184, + 594, + 663, + 604 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "## Understanding the Problem", + "bbox": [ + 184, + 609, + 305, + 617 + ], + "page_idx": 43 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Diana draws a rectangle with some squares colored black.", + "- Each white square contains a number indicating how many adjacent black squares it has", + "- Adjacent means sharing an edge (up, down, left, right)", + "- Maximum possible number for any white square is 4 (if all 4 adjacent squares are black)", + "- We need to find the maximum possible sum across all white squares" + ], + "bbox": [ + 186, + 617, + 519, + 657 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Strategy", + "bbox": [ + 184, + 664, + 232, + 672 + ], + "page_idx": 43 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "To maximize the sum, I'll need to determine:", + "1. How many squares should be black vs. white", + "2. The optimal arrangement of these squares" + ], + "bbox": [ + 186, + 672, + 364, + 696 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Analysis", + "bbox": [ + 184, + 703, + 233, + 710 + ], + "page_idx": 43 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Let's consider what makes the sum large:", + "- Each white square can contribute at most 4 to the sum", + "- Each black square can contribute to at most 4 adjacent white squares", + "- The total represents the number of adjacencies between black and white squares" + ], + "bbox": [ + 186, + 710, + 490, + 743 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "For a 2018-square rectangle, the optimal shape matters. Let's try a $43 \\times 47$ rectangle (43×47=2021, which is close enough to 2018 for our analysis).", + "bbox": [ + 184, + 750, + 718, + 758 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "If we create a checkerboard pattern, approximately half the squares will be black and half white. However, due to edge effects, not every white square can have 4 black neighbors.", + "bbox": [ + 184, + 765, + 808, + 782 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Calculating the Maximum", + "bbox": [ + 184, + 789, + 299, + 797 + ], + "page_idx": 43 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "For a rectangle with dimensions $r \\times c$ , the maximum sum occurs when:", + "- Black squares are placed in a checkerboard pattern", + "- The rectangle is as close to square as possible (minimizing perimeter)" + ], + "bbox": [ + 186, + 797, + 447, + 821 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "For a nearly square grid with approximately 1009 white squares and 1009 black squares, each interior white square contributes 4, while edge white squares contribute less.", + "bbox": [ + 184, + 827, + 808, + 844 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "From a theoretical analysis, the maximum sum would be: $-55 \\times 55 = 3025$", + "bbox": [ + 184, + 851, + 401, + 867 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "This corresponds to option (C) 3025.", + "bbox": [ + 186, + 873, + 321, + 883 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Answer: C", + "bbox": [ + 186, + 888, + 228, + 897 + ], + "page_idx": 43 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 43 + }, + { + "type": "page_number", + "text": "44", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Inductive (Easy)", + "text_level": 1, + "bbox": [ + 429, + 112, + 557, + 128 + ], + "page_idx": 44 + }, + { + "type": "image", + "img_path": "images/cbd2f7e3140808a83ca69a0a2e1610dd9a654841f60dae20b2433020e2adb7e1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 593, + 112, + 612, + 126 + ], + "page_idx": 44 + }, + { + "type": "image", + "img_path": "images/f9ae1628f4f27c67e42a309bcf565c5199ee7d184e1863bf889224ec5abf6921.jpg", + "image_caption": [ + "Figure 32: A sample error case of Inductive Reasoning (difficulty: Easy)." + ], + "image_footnote": [], + "bbox": [ + 184, + 140, + 228, + 172 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "A.", + "bbox": [ + 204, + 175, + 210, + 181 + ], + "page_idx": 44 + }, + { + "type": "image", + "img_path": "images/094995f7fd6661fa2cf01e8206d18ba8f31705d31dd9700b64a1e8c398d45ed3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 259, + 141, + 300, + 172 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "B", + "bbox": [ + 276, + 175, + 284, + 183 + ], + "page_idx": 44 + }, + { + "type": "image", + "img_path": "images/4d6f1428c39c2fafedb135682adbf4b8de0cd898e86e0609786958190341c596.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 141, + 372, + 172 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "C", + "bbox": [ + 346, + 175, + 354, + 183 + ], + "page_idx": 44 + }, + { + "type": "image", + "img_path": "images/5ebbe461b93a2cbe955c72fb377c52e5c4b6a02d084792aa8be5cc43571a02a0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 406, + 141, + 449, + 172 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "D", + "bbox": [ + 419, + 175, + 429, + 183 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Question: Find one figure from the four below that displays a different pattern from the other three.", + "bbox": [ + 460, + 141, + 797, + 167 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Ground Truth: (D)", + "text_level": 1, + "bbox": [ + 460, + 175, + 573, + 189 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Claude-3.7-Sonnet-Thinking: Let me analyze each figure to identify any patterns or differences among them:", + "bbox": [ + 184, + 196, + 702, + 208 + ], + "page_idx": 44 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Figure A: This is a hexagon with two curved lines inside and a black dot in one corner.", + "Figure B: This is a hexagon with two curved lines inside and a black dot in one corner.", + "Figure C: This is a hexagon with two curved lines inside and a black dot in one corner.", + "Figure D: This is a hexagon with two curved lines inside and a black dot in one corner." + ], + "bbox": [ + 184, + 214, + 553, + 252 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "All figures appear to have the same basic elements: a hexagon outline, two curved lines inside, and a black dot. Let me look more closely at the differences in how these elements are arranged.", + "bbox": [ + 184, + 258, + 810, + 279 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Looking at the curved lines:", + "bbox": [ + 184, + 286, + 307, + 296 + ], + "page_idx": 44 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Figure A: The two curved lines appear to form a single continuous curve that divides the hexagon.", + "- Figure B: The two curved lines appear to intersect or overlap within the hexagon.", + "- Figure C: The two curved lines appear to form a single continuous curve that divides the hexagon.", + "- Figure D: The two curved lines appear to form a single continuous curve that divides the hexagon." + ], + "bbox": [ + 186, + 296, + 609, + 332 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Looking at the position of the dot:", + "bbox": [ + 184, + 339, + 333, + 349 + ], + "page_idx": 44 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Figure A: The dot is in one corner of the hexagon.", + "- Figure B: The dot is in one corner of the hexagon.", + "- Figure C: The dot is in one corner of the hexagon.", + "- Figure D: The dot is in one corner of the hexagon." + ], + "bbox": [ + 186, + 349, + 403, + 385 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Let me look even more closely at the arrangement of the curves:", + "bbox": [ + 184, + 393, + 459, + 402 + ], + "page_idx": 44 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Figure A: The curves divide the hexagon into sections, with one section containing the dot.", + "- Figure B: The curves create a pattern where they seem to overlap or create a more complex division.", + "- Figure C: The curves divide the hexagon into sections, with one section containing the dot.", + "- Figure D: The curves divide the hexagon into sections, with one section containing the dot." + ], + "bbox": [ + 186, + 402, + 616, + 438 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Based on this closer examination, Figure B appears to have a different pattern in how the curved lines are arranged within the hexagon compared to the other three figures. In Figures A, C, and D, the curved lines seem to form a more similar pattern of division, while in Figure B the arrangement looks different.", + "bbox": [ + 184, + 446, + 810, + 474 + ], + "page_idx": 44 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Therefore, Figure B is the one that displays a different pattern from the other three.", + "", + "# Pattern Recognition Problem Analysis" + ], + "bbox": [ + 184, + 481, + 539, + 510 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "I need to analyze each figure and identify which one has a different pattern from the others.", + "bbox": [ + 184, + 517, + 576, + 527 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Looking at all four figures:", + "bbox": [ + 184, + 535, + 300, + 545 + ], + "page_idx": 44 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- All are hexagons", + "- All contain curved lines inside", + "- All have a single black dot", + "- All have two curved lines creating sections within the hexagon" + ], + "bbox": [ + 186, + 545, + 459, + 580 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Let me examine the specific arrangement of these elements in each figure:", + "bbox": [ + 184, + 588, + 500, + 599 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "## Figure A:", + "bbox": [ + 186, + 607, + 241, + 614 + ], + "page_idx": 44 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Two curved lines that create sections", + "- The black dot is positioned in the lower left corner", + "- The curves form a pattern that divides the hexagon into three sections" + ], + "bbox": [ + 186, + 616, + 488, + 643 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Figure B:", + "bbox": [ + 186, + 651, + 240, + 660 + ], + "page_idx": 44 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Two curved lines that appear to overlap or intersect", + "- The black dot is positioned in the lower right corner", + "- The curves create a different division pattern compared to the other figures" + ], + "bbox": [ + 186, + 660, + 511, + 688 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Figure C:", + "bbox": [ + 186, + 695, + 241, + 704 + ], + "page_idx": 44 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Two curved lines that create sections", + "- The black dot is positioned in the lower left corner", + "- The pattern of curve division is similar to Figure A" + ], + "bbox": [ + 186, + 705, + 408, + 732 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Figure D:", + "bbox": [ + 186, + 739, + 241, + 750 + ], + "page_idx": 44 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Two curved lines that create sections", + "- The black dot is positioned in the lower right corner", + "- The pattern of curve division is similar to Figures A and C." + ], + "bbox": [ + 186, + 750, + 439, + 776 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "The key difference is in how the curved lines are arranged. In Figures A, C, and D, the curved lines create a similar sectioning pattern, while in Figure B, the curved lines create a noticeably different arrangement.", + "bbox": [ + 184, + 784, + 810, + 804 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Answer: B", + "bbox": [ + 186, + 811, + 233, + 820 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Error Category: Reasoning Error", + "bbox": [ + 184, + 825, + 379, + 837 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Error Reason: The model makes a reasoning error by overemphasizing minor variations in the curve overlap while overlooking the more fundamental structural difference. Figure D is the only one where the black dot and curves are positioned differently, breaking the symmetry seen in A, B, and C. The model incorrectly identifies B as the outlier due to superficial curve complexity rather than recognizing the true deviation in spatial structure.", + "bbox": [ + 184, + 838, + 810, + 898 + ], + "page_idx": 44 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 44 + }, + { + "type": "page_number", + "text": "45", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Inductive (Easy)", + "text_level": 1, + "bbox": [ + 429, + 112, + 557, + 128 + ], + "page_idx": 45 + }, + { + "type": "image", + "img_path": "images/afd0faec9feb947504d44ca25613ade5faa37fee1a4f3a557b8ced9f2809e7ed.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 593, + 112, + 612, + 126 + ], + "page_idx": 45 + }, + { + "type": "image", + "img_path": "images/620f6d56098b109afe8ae4b63539ad8f4f4d3e720dff2c685ff3ad62e3d73bee.jpg", + "image_caption": [ + "Figure 33: A sample correct case of Inductive Reasoning (difficulty: Easy)." + ], + "image_footnote": [ + "A. 3.5.6.7", + "B.", + "C.", + "D." + ], + "bbox": [ + 282, + 136, + 413, + 209 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Question: Analyze the pattern of changes in the figure below, and choose the appropriate fifth row:", + "bbox": [ + 444, + 157, + 797, + 184 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Ground Truth: (B)", + "bbox": [ + 446, + 191, + 558, + 205 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Claude-3.7-Sonnet-Thinking: Let me analyze the pattern in this figure carefully. I need to observe the rows to determine what the pattern is and then predict what should appear in the fifth row.", + "bbox": [ + 184, + 250, + 810, + 267 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Row 1:", + "bbox": [ + 186, + 273, + 277, + 281 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Row 2:", + "bbox": [ + 186, + 282, + 277, + 289 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Row 3:", + "bbox": [ + 186, + 289, + 277, + 296 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Row 4:", + "bbox": [ + 186, + 296, + 277, + 304 + ], + "page_idx": 45 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Looking at this pattern, I notice that:", + "- Each row has 7 symbols", + "Each row contains a mix of hearts $(\\bigcirc)$ and flowers $(\\bullet)$", + "- There seems to be a shifting pattern" + ], + "bbox": [ + 186, + 311, + 392, + 345 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "I notice that if I track the position of the hearts $(\\bigcirc)$ :", + "bbox": [ + 186, + 351, + 375, + 359 + ], + "page_idx": 45 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "-Row 1:positions 1,3,4", + "-Row 2:positions 2,4,5", + "-Row 3:positions 3,5,6", + "-Row 4:positions 4,6,7" + ], + "bbox": [ + 186, + 361, + 277, + 390 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "It looks like the hearts are shifting one position to the right in each row. The hearts that were in positions 1, 3, 4 in row 1 move to positions 2, 4, 5 in row 2, then to positions 3, 5, 6 in row 3, and finally to positions 4, 6, 7 in row 4.", + "bbox": [ + 184, + 398, + 808, + 415 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Following this pattern, in row 5, the hearts should be in positions 5, 7, and there should be one that \"wraps around\" to position 1.", + "bbox": [ + 184, + 421, + 665, + 431 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "So the fifth row should be: $\\bigcirc \\bigcirc \\bigcirc \\bigcirc \\bigcirc \\bigcirc$", + "bbox": [ + 184, + 436, + 349, + 445 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Now let me check the answer choices:", + "bbox": [ + 186, + 452, + 328, + 460 + ], + "page_idx": 45 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "A. $\\bullet \\bullet \\bullet \\bullet \\bullet \\bullet \\bullet$ - This doesn't match", + "B.♥ - This matches my prediction", + "C. $\\because \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\cdot$ This doesn't match", + "D. $\\bigstar \\bigstar \\bigstar \\bigstar \\bigstar \\bigstar \\bigstar \\bigstar -$ This doesn't match" + ], + "bbox": [ + 186, + 460, + 369, + 491 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "So the answer should be B.", + "bbox": [ + 186, + 500, + 287, + 507 + ], + "page_idx": 45 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Let me double-check by looking at the pattern of the flowers $(\\star)$ as well:", + "-Row 1:positions2,5,6,7", + "-Row 2:positions1,3,6,7", + "-Row 3:positions1,2,4,7", + "-Row 4:positions1,2,3,5" + ], + "bbox": [ + 186, + 513, + 457, + 554 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "The flowers are in all positions where hearts are not. So in row 5, they should be in positions 2, 3, 4, 6.", + "bbox": [ + 186, + 561, + 563, + 570 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "That gives: $\\text{心}$", + "bbox": [ + 186, + 570, + 292, + 577 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "This confirms that option B (♥ ♀ ♀ ♀ ♀ ♀) is correct.", + "bbox": [ + 186, + 577, + 395, + 585 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "", + "bbox": [ + 186, + 585, + 246, + 593 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Analyzing the Pattern in the Figure", + "bbox": [ + 186, + 593, + 326, + 601 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "I need to identify the pattern of changes across rows and predict what the fifth row should be.", + "bbox": [ + 186, + 608, + 535, + 617 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "## Observation of the Pattern", + "bbox": [ + 186, + 625, + 299, + 631 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Looking at the existing four rows:", + "bbox": [ + 186, + 638, + 313, + 647 + ], + "page_idx": 45 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "-Row1:", + "-Row2:", + "-Row 3:", + "-Row 4:" + ], + "bbox": [ + 186, + 648, + 281, + 676 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "## Pattern Analysis", + "bbox": [ + 186, + 686, + 261, + 694 + ], + "page_idx": 45 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "I notice that the position of the hearts $(\\hat{\\mathbb{V}})$ shifts in a consistent way:", + "-Row 1: Hearts in positions 1,3,4", + "-Row 2: Hearts in positions 2,4,5", + "-Row 3: Hearts in positions 3,5,6", + "-Row 4: Hearts in positions 4,6,7" + ], + "bbox": [ + 186, + 702, + 436, + 741 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "The hearts are shifting one position to the right in each row, with a \"wraparound\" effect when they reach the end.", + "bbox": [ + 184, + 748, + 606, + 757 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Predicting the Fifth Row", + "bbox": [ + 186, + 763, + 292, + 772 + ], + "page_idx": 45 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Following this pattern, in row 5, the hearts should be in positions:", + "-5 shifted from 4)", + "-7 (shifted from 6)" + ], + "bbox": [ + 186, + 779, + 431, + 801 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "-1 (shifted from 7, wrapped around)", + "bbox": [ + 186, + 804, + 323, + 811 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "So the fifth row should be:", + "bbox": [ + 186, + 818, + 349, + 825 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "## Checking the Options", + "bbox": [ + 186, + 834, + 281, + 842 + ], + "page_idx": 45 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "A. $\\bullet \\bullet \\bullet \\bullet \\bullet \\bullet \\bullet -\\mathrm{Not}$ a match", + "B.♥ -This matches my prediction", + "C.♥ -Not a match", + "D. \\(\\text{串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串" + ], + "bbox": [ + 186, + 849, + 369, + 880 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Answer: B", + "bbox": [ + 186, + 888, + 227, + 896 + ], + "page_idx": 45 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 45 + }, + { + "type": "page_number", + "text": "46", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Inductive (Medium)", + "text_level": 1, + "bbox": [ + 415, + 112, + 573, + 128 + ], + "page_idx": 46 + }, + { + "type": "image", + "img_path": "images/88905e754aef245617dad71ab60cf29cbaa7de0c2cdc7bfa6246797edc486a4a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 593, + 112, + 612, + 127 + ], + "page_idx": 46 + }, + { + "type": "image", + "img_path": "images/9b0cd4b7d45ea96e77e1a76fa635f68d59efca6ae415ddcea6116702d2a08391.jpg", + "image_caption": [ + "Figure 34: A sample error case of Inductive Reasoning (difficulty: Medium)." + ], + "image_footnote": [], + "bbox": [ + 204, + 140, + 450, + 183 + ], + "page_idx": 46 + }, + { + "type": "image", + "img_path": "images/f43ef894cc362d7c0b9717dfbc06a4a9667abb2d5aff08d0cb8cfc765e949aa3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 228, + 188, + 423, + 229 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "A", + "bbox": [ + 250, + 234, + 261, + 241 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "B", + "bbox": [ + 299, + 234, + 310, + 241 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "C", + "bbox": [ + 344, + 234, + 352, + 241 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "D", + "bbox": [ + 398, + 234, + 403, + 241 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Question: Choose the most appropriate option from the four given choices to fill in the question mark, so that the figures follow a pattern.", + "bbox": [ + 470, + 157, + 800, + 196 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Ground Truth: (C)", + "bbox": [ + 470, + 210, + 584, + 224 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Claude-3.7-Sonnet-Thinking: Let me analyze the pattern in the given sequence of figures and find what should come in place of the question mark.", + "bbox": [ + 184, + 250, + 812, + 270 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "The sequence shows hexagons with internal lines. Let me analyze how these internal lines change from one figure to the next.", + "bbox": [ + 184, + 276, + 720, + 287 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "First figure: A hexagon with a single line from the top vertex going down to about the middle of the hexagon.", + "bbox": [ + 184, + 294, + 651, + 306 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Second figure: A hexagon with a single line from the upper-right vertex going to the middle-left of the hexagon.", + "bbox": [ + 184, + 311, + 660, + 323 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Third figure: A hexagon with a single line from the bottom-right vertex going to the upper-left of the hexagon.", + "bbox": [ + 184, + 329, + 653, + 340 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Fourth figure: A hexagon with a single line from the bottom-left vertex going to the upper-right of the hexagon.", + "bbox": [ + 184, + 347, + 658, + 359 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "So the pattern seems to be that the line rotates in a clockwise direction around the hexagon. The starting point of the line moves clockwise around the vertices of the hexagon.", + "bbox": [ + 183, + 364, + 810, + 386 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Given this pattern, the fifth figure (the one with the question mark) should have a line from the lower vertex (at the bottom) going up to the upper-right of the hexagon.", + "bbox": [ + 183, + 392, + 810, + 412 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Now let's check the options:", + "bbox": [ + 184, + 419, + 308, + 429 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Option A: A hexagon with a line from the left vertex going to the right vertex.", + "bbox": [ + 184, + 429, + 516, + 438 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Option B: A hexagon with a line from the upper-left vertex going to the lower-right.", + "bbox": [ + 184, + 438, + 540, + 446 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Option C: A hexagon with a line from the top vertex going to the bottom-right.", + "bbox": [ + 184, + 446, + 519, + 455 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Option D: A hexagon with a line from the top vertex going to the bottom.", + "bbox": [ + 184, + 455, + 496, + 464 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Looking at the pattern and the next expected figure, option A seems closest to what I'm expecting - a line going from the left side to the right side. Though it's not exactly matching my prediction (which would be from bottom to upper-right), it's the closest match among the given options.", + "bbox": [ + 184, + 472, + 808, + 492 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "OK, I think I've been struggling because I don't have a completely clear view of the images. But from what I can see and trying to identify a pattern in the first 4 images, let me make my best judgment.", + "bbox": [ + 184, + 500, + 810, + 518 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "The line inside the hexagon appears to be moving in some pattern as we go from one figure to the next. Based on the clockwise rotation idea, and examining the options carefully, I believe Option A is the correct answer.", + "bbox": [ + 184, + 525, + 810, + 545 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "", + "bbox": [ + 184, + 545, + 256, + 553 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Analysis of Pattern in Hexagon Sequence", + "bbox": [ + 184, + 553, + 372, + 561 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "I need to identify the pattern in the sequence of figures and determine which option should replace the question mark.", + "bbox": [ + 184, + 570, + 689, + 580 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Examining the Pattern", + "bbox": [ + 184, + 588, + 297, + 598 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Let me analyze the first four figures in the sequence:", + "bbox": [ + 184, + 604, + 410, + 616 + ], + "page_idx": 46 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. First figure: A hexagon with a line from the top vertex to the bottom-left side", + "2. Second figure: A hexagon with a line from the top-right vertex to the bottom side", + "3. Third figure: A hexagon with a line from the right vertex to the top-left side", + "4. Fourth figure: A hexagon with a line from the bottom-right vertex to the left side" + ], + "bbox": [ + 184, + 623, + 540, + 660 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Looking at this sequence, I can see that the starting point of the line moves clockwise around the hexagon (from top $\\rightarrow$ top-right $\\rightarrow$ right $\\rightarrow$ bottom-right). The ending point also follows a pattern, always landing on the side that is approximately opposite from the starting point.", + "bbox": [ + 184, + 667, + 810, + 688 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Predicting the Next Figure", + "bbox": [ + 184, + 694, + 315, + 705 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Following this pattern, the fifth figure should have a line starting from the bottom-left vertex (continuing the clockwise movement of the starting point). The line should end at the top or top-right side.", + "bbox": [ + 184, + 712, + 810, + 733 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Evaluating the Options", + "bbox": [ + 184, + 739, + 302, + 750 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Looking at the provided options:", + "bbox": [ + 184, + 757, + 328, + 766 + ], + "page_idx": 46 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Option A: A hexagon with a line from the left vertex to the right side", + "- Option B: A hexagon with a line from the upper-left vertex to the lower-right side", + "- Option C: A hexagon with a line from the upper-left vertex to the right side", + "- Option D: A hexagon with a line from the top vertex to the right side" + ], + "bbox": [ + 184, + 766, + 537, + 801 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Option A is the only one that features a line starting from the left vertex, which is the closest to the bottom-left vertex that should be next in our pattern. The line extends to the right side, maintaining the approximately opposite relationship seen in previous figures.", + "bbox": [ + 184, + 809, + 810, + 830 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Answer: A", + "bbox": [ + 184, + 837, + 233, + 845 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Error Category: Reasoning Error", + "bbox": [ + 184, + 849, + 379, + 862 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Error Reason: The model correctly observes a clockwise movement but incorrectly predicts the direction and placement of the next line, choosing option A. The model fails to identify the angle of the movement pattern.", + "bbox": [ + 184, + 862, + 810, + 900 + ], + "page_idx": 46 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 46 + }, + { + "type": "page_number", + "text": "47", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Inductive (Medium)", + "text_level": 1, + "bbox": [ + 415, + 112, + 573, + 128 + ], + "page_idx": 47 + }, + { + "type": "image", + "img_path": "images/739f5d92cd82927a2d7bdf4d2ab2309c4bc04f1b4fc7ed3709e892e83ee42b4e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 593, + 112, + 612, + 126 + ], + "page_idx": 47 + }, + { + "type": "image", + "img_path": "images/dabc692ef1c0c1082feaaf1e2462b37ee282600a2506e8c89411d6c3df9a1439.jpg", + "image_caption": [ + "①" + ], + "image_footnote": [], + "bbox": [ + 248, + 130, + 295, + 166 + ], + "page_idx": 47 + }, + { + "type": "image", + "img_path": "images/833d68dbfe0935e410c0edebe6b9a388a975d4f0df03af384d4597f191c08d49.jpg", + "image_caption": [ + "②" + ], + "image_footnote": [], + "bbox": [ + 300, + 132, + 344, + 164 + ], + "page_idx": 47 + }, + { + "type": "image", + "img_path": "images/61d4e7983c5dbd66a1aaf55dd7c03249b7db7e007c72d194c2194e20780a9fa2.jpg", + "image_caption": [ + "$\\therefore m = \\frac{3}{11}$ ;" + ], + "image_footnote": [], + "bbox": [ + 351, + 132, + 395, + 165 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Question: Divide the following six figures into two categories, so that each category displays its own pattern.", + "bbox": [ + 408, + 159, + 799, + 186 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Ground Truth: (D)", + "bbox": [ + 410, + 196, + 524, + 210 + ], + "page_idx": 47 + }, + { + "type": "image", + "img_path": "images/ad7496557f0542675336a310ca2059dadf3a471b7b68213fd05a31105358d979.jpg", + "image_caption": [ + "$④$", + "Figure 35: A sample correct case of Inductive Reasoning (difficulty: Medium)." + ], + "image_footnote": [], + "bbox": [ + 248, + 176, + 295, + 210 + ], + "page_idx": 47 + }, + { + "type": "image", + "img_path": "images/34f92a4aa521689cf2d90b4f881ab0bffe6490c57e02a38f4cb90bd5f6129190.jpg", + "image_caption": [ + "(5)" + ], + "image_footnote": [], + "bbox": [ + 300, + 178, + 344, + 210 + ], + "page_idx": 47 + }, + { + "type": "image", + "img_path": "images/d96d394318067b904a712079a7165288a420ae879a0d88f7529f2c1c573e3d0f.jpg", + "image_caption": [ + "$\\therefore m = \\frac{3}{11}$ ;" + ], + "image_footnote": [], + "bbox": [ + 351, + 178, + 395, + 210 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "B. ①④⑥, ②③⑤", + "bbox": [ + 318, + 226, + 388, + 234 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "D. ①②④, ③⑤⑥", + "bbox": [ + 318, + 238, + 388, + 244 + ], + "page_idx": 47 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "A. ①③⑤. ②④⑥", + "C.①③④. ②⑤⑥" + ], + "bbox": [ + 184, + 224, + 256, + 244 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Clae-3.7-Sonnet-Thinking: I need to analyze the six figures and find a pattern that divides them into two distinct categories. Let me examine each figure:", + "bbox": [ + 184, + 253, + 810, + 263 + ], + "page_idx": 47 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Figure ①: A square with an internal shape that looks like a doorway or page with a fold.", + "Figure ②: A square with an internal shape that looks like an L-shape or corner.", + "Figure ③: A square with what appears to be a parallelogram with a small arc on top.", + "Figure 4: A square with some zigzag or lightning-bolt-like shape inside.", + "Figure ⑤: A square with a circle that has an X or cross inside it.", + "Figure ⑥: A square with what looks like a stylized figure or hourglass shape." + ], + "bbox": [ + 186, + 268, + 514, + 319 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Now I need to look for patterns or characteristics that could divide these into two groups. Some possible categorization criteria:", + "bbox": [ + 184, + 324, + 660, + 333 + ], + "page_idx": 47 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Curved vs. straight lines", + "- Symmetrical vs. asymmetrical", + "- Simple vs. complex", + "- Open vs. closed shapes", + "- Single shape vs. multiple shapes" + ], + "bbox": [ + 186, + 333, + 313, + 372 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Let me try to apply these criteria:", + "bbox": [ + 184, + 378, + 310, + 388 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Curved vs. straight lines:", + "bbox": [ + 184, + 393, + 281, + 402 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "- Figures with prominent curves: ⑤ (circle with X)", + "bbox": [ + 186, + 402, + 375, + 410 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Figures with mainly straight lines: ①, ②, ③, ④, ⑥", + "bbox": [ + 186, + 410, + 380, + 419 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "That doesn't divide evenly into two groups.", + "bbox": [ + 186, + 425, + 349, + 435 + ], + "page_idx": 47 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Symmetrical vs. asymmetrical:", + "- More symmetrical: ⑤ (circle with X), possibly ⑥", + "- Less symmetrical/asymmetrical: ①, ②, ③, ④" + ], + "bbox": [ + 184, + 441, + 372, + 465 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Simple vs. complex:", + "bbox": [ + 184, + 472, + 263, + 481 + ], + "page_idx": 47 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Simpler shapes: ②, ⑤", + "- More complex shapes: ①, ③, ④, ⑥" + ], + "bbox": [ + 186, + 481, + 323, + 496 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Open vs. closed shapes inside the square:", + "bbox": [ + 184, + 503, + 341, + 511 + ], + "page_idx": 47 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Closed shapes: ⑤ (circle)", + "- Open shapes: ①, ②, ③, ④, ⑥" + ], + "bbox": [ + 186, + 511, + 302, + 527 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Let me look at whether the internal shapes touch the outer square:", + "bbox": [ + 184, + 534, + 434, + 542 + ], + "page_idx": 47 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Touching the outer square: ①, ②, ④, possibly ③ and ⑥", + "- Not touching the outer square: ⑤" + ], + "bbox": [ + 186, + 542, + 398, + 559 + ], + "page_idx": 47 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Another possibility - shapes with internal divisions vs. shapes without:", + "- With internal divisions: ①, ④, ⑤", + "- Without internal divisions: ②, ③, ⑥" + ], + "bbox": [ + 184, + 565, + 450, + 589 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Let me check the answer choices and see if any of these patterns match:", + "bbox": [ + 184, + 595, + 452, + 604 + ], + "page_idx": 47 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "A. ①③⑤, ②④⑥", + "B. ①④⑥, ②③⑤", + "C. ①③④, ②⑤⑥", + "D. ①②④, ③⑤⑥" + ], + "bbox": [ + 186, + 611, + 253, + 643 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Looking at option A: What could unite $①③⑤$ ? They don't seem to share an obvious common pattern. What could unite $246?$ Again, not seeing an obvious pattern.", + "bbox": [ + 184, + 650, + 803, + 660 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Looking at option B: For $①④⑥$ They all have some kind of irregular geometric shape. For $②③⑤$ They all seem to have more regular geometric elements.", + "bbox": [ + 186, + 660, + 759, + 667 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Looking at option C: For $①③④$ : Not seeing a clear pattern. For $②⑤⑥$ : Not seeing a clear pattern.", + "bbox": [ + 186, + 667, + 549, + 675 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Looking at option D: For $124$ These all have shapes with straight lines and angles. For $356$ These all have some curved elements - $③$ has an arc on top, $⑤$ is a", + "bbox": [ + 186, + 675, + 810, + 683 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "circle, $⑥$ has curved sides", + "bbox": [ + 186, + 683, + 284, + 691 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "I think option D might be the answer. The distinction seems to be:", + "bbox": [ + 186, + 696, + 431, + 705 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "-Group $①②④$ Figures with only straight lines and angles", + "bbox": [ + 186, + 705, + 405, + 714 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "- Group $③⑤⑥$ Figures that incorporate some curved elements", + "bbox": [ + 186, + 714, + 421, + 722 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "", + "bbox": [ + 186, + 722, + 246, + 729 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Analyzing Six Geometric Figures", + "bbox": [ + 186, + 729, + 318, + 738 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "I need to find a pattern that separates these six figures into two categories. Let me examine each figure carefully:", + "bbox": [ + 186, + 744, + 602, + 753 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Figure ①: Square with an internal shape that has straight edges (looks like a doorway or page)", + "bbox": [ + 184, + 758, + 537, + 768 + ], + "page_idx": 47 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Figure ②: Square with an L-shaped internal figure made of straight lines", + "Figure ③: Square with a trapezoid/parallelogram shape that has a curved arc on top", + "Figure ④: Square with zigzag/lightning-shaped internal figure made of straight lines", + "Figure ⑤: Square with a circle (curved) containing an X", + "Figure ⑥: Square with what appears to be a stylized figure with curved edges" + ], + "bbox": [ + 184, + 768, + 504, + 808 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Looking at potential pattern differences:", + "bbox": [ + 184, + 814, + 336, + 823 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Option D: ①②④, ③⑤⑥", + "bbox": [ + 186, + 829, + 292, + 837 + ], + "page_idx": 47 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Group ①②④: All figures contain ONLY straight lines and angular shapes", + "- Group $③⑤⑥$ All figures incorporate curved elements (arc in $③$ circle in $⑤$ curved sides in $⑥$", + "This is a clear and consistent pattern. The first group has exclusively straight-line geometry, while the second group all incorporate curved elements in their design." + ], + "bbox": [ + 186, + 837, + 792, + 862 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "The other options don't reveal as clear a pattern distinction.", + "bbox": [ + 184, + 868, + 408, + 878 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Answer: D", + "bbox": [ + 186, + 883, + 228, + 892 + ], + "page_idx": 47 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 47 + }, + { + "type": "page_number", + "text": "48", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 47 + }, + { + "type": "text", + "text": "Inductive (Hard)", + "text_level": 1, + "bbox": [ + 428, + 112, + 560, + 128 + ], + "page_idx": 48 + }, + { + "type": "image", + "img_path": "images/c6fc1a83d6a9bc93b8784359ac076f5ca84d132570716648ebe7672867c6bd45.jpg", + "image_caption": [ + "Figure 36: A sample error case of Inductive Reasoning (difficulty: Hard)." + ], + "image_footnote": [], + "bbox": [ + 593, + 112, + 612, + 127 + ], + "page_idx": 48 + }, + { + "type": "image", + "img_path": "images/1994f7acc55cf3c5043ee5cd9ca074533673ed5163c5a6430e8a63567a075735.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 200, + 133, + 416, + 166 + ], + "page_idx": 48 + }, + { + "type": "image", + "img_path": "images/b3a81caaab7e6d1a6308effe863bcf676eca4ca870e4b241ebb9c196a80297ae.jpg", + "image_caption": [ + "A B C D" + ], + "image_footnote": [], + "bbox": [ + 235, + 170, + 380, + 202 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "Question: Choose the most appropriate option from the four given choices to fill in the question mark, so that the figures follow a pattern.", + "bbox": [ + 426, + 138, + 792, + 176 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "Ground Truth: (B)", + "bbox": [ + 426, + 188, + 540, + 203 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "Claude-3.7-Sonnet-Thinking: Let me analyze this pattern recognition problem step by step.", + "bbox": [ + 184, + 215, + 573, + 227 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "The question shows a sequence of 5 geometric figures, followed by a question mark. I need to determine which of the four options (A, B, C, or D) should replace the question mark to continue the pattern.", + "bbox": [ + 184, + 231, + 812, + 250 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "The sequence is:", + "text_level": 1, + "bbox": [ + 184, + 255, + 250, + 263 + ], + "page_idx": 48 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Two overlapping circles", + "2. A triangle inside a circle", + "3. A shape that looks like an oval with horizontal lines", + "4. A square with a triangle inside or a diagonal line", + "5. A pentagon (5-sided polygon)" + ], + "bbox": [ + 184, + 265, + 388, + 303 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "? (The figure we need to determine)", + "bbox": [ + 184, + 303, + 321, + 310 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "Options:", + "text_level": 1, + "bbox": [ + 184, + 316, + 220, + 325 + ], + "page_idx": 48 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "A. A hexagon (6-sided polygon)", + "B. A rectangle with a diagonal line", + "C. A square with a grid/cross inside" + ], + "bbox": [ + 184, + 325, + 321, + 349 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "D. A triangle", + "bbox": [ + 184, + 349, + 236, + 357 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "Let me look for patterns:", + "text_level": 1, + "bbox": [ + 184, + 364, + 279, + 372 + ], + "page_idx": 48 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- If we're counting sides: circles (infinite sides), triangle (3 sides), oval (curvy, so infinite?), square/rectangle (4 sides), pentagon (5 sides)", + "- If looking at the number of sides, it might be increasing: $\\infty, 3, \\infty, 4, 5\\ldots$ which would suggest 6 sides next (hexagon)", + "- Or it could be about complexity of the shapes or some transformation between them" + ], + "bbox": [ + 184, + 372, + 691, + 396 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "Looking at option A (hexagon), this would follow if the pattern is increasing the number of sides: pentagon (5 sides) $\\rightarrow$ hexagon (6 sides).", + "bbox": [ + 184, + 402, + 696, + 412 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "This seems like the most logical pattern - the progression of regular polygons with increasing number of sides:", + "bbox": [ + 184, + 417, + 596, + 426 + ], + "page_idx": 48 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Pentagon (5 sides)", + "- Hexagon (6 sides)", + "- Heptagon (7 sides)", + "- Octagon (8 sides)" + ], + "bbox": [ + 184, + 428, + 261, + 458 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "etc.", + "bbox": [ + 184, + 459, + 200, + 465 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "So, the answer appears to be A (hexagon).", + "bbox": [ + 184, + 473, + 343, + 482 + ], + "page_idx": 48 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Let me double-check if there's another pattern I'm missing:", + "- The shapes aren't consistently \"shape within a shape\" so that's not the pattern", + "- It's not about filled vs. unfilled shapes", + "- It doesn't appear to be about rotation or reflection" + ], + "bbox": [ + 184, + 488, + 480, + 520 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "The increasing number of sides seems to be the most consistent pattern:", + "bbox": [ + 184, + 527, + 452, + 536 + ], + "page_idx": 48 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "-∞ sides (circles)", + "-3 sides (triangle)", + "- Curved shape (represents $\\infty$ again or could be a special case)", + "- 4 sides (square/rectangle)", + "- 5 sides (pentagon)", + "- ? (logically 6 sides, which would be a hexagon)" + ], + "bbox": [ + 186, + 537, + 416, + 583 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "Therefore, option A (hexagon) appears to be the correct answer.", + "bbox": [ + 184, + 589, + 423, + 599 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "", + "bbox": [ + 186, + 599, + 248, + 606 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "I need to identify the pattern in this sequence of figures and determine which option should replace the question mark.", + "bbox": [ + 184, + 606, + 630, + 614 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "Let me analyze the sequence:", + "bbox": [ + 184, + 621, + 295, + 630 + ], + "page_idx": 48 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Two overlapping circles", + "2. A triangle inside a circle", + "3. An oval with horizontal lines", + "4. A quadrilateral with a diagonal line", + "5.A pentagon (5-sided polygon)", + "6.2 (This is what I need to find)" + ], + "bbox": [ + 186, + 630, + 328, + 674 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "Looking at these shapes, I notice a potential pattern in the number of sides of the primary shapes:", + "bbox": [ + 184, + 683, + 550, + 691 + ], + "page_idx": 48 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- First shape: Circles (infinite sides)", + "- Second shape: Triangle (3 sides)", + "- Third shape: Oval (curved shape)", + "- Fourth shape: Quadrilateral (4 sides)", + "- Fifth shape: Pentagon (5 sides)" + ], + "bbox": [ + 186, + 691, + 344, + 731 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "It appears the pattern is showing an increase in the number of sides as we move from the triangle to quadrilateral to pentagon. Following this pattern, the next shape should have 6 sides.", + "bbox": [ + 184, + 737, + 812, + 753 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "Looking at the options:", + "bbox": [ + 184, + 761, + 274, + 768 + ], + "page_idx": 48 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Option A: Hexagon (6-sided polygon)", + "-Option B:Rectangle with diagonal line", + "- Option C: Square with grid", + "-Option D:Triangle" + ], + "bbox": [ + 186, + 768, + 336, + 800 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "Since the logical progression is to a 6-sided figure after the 5-sided pentagon, option A (hexagon) is the correct answer to continue the pattern.", + "bbox": [ + 184, + 806, + 712, + 816 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "Answer: A", + "bbox": [ + 184, + 823, + 230, + 830 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "Error Category: Reasoning Error", + "text_level": 1, + "bbox": [ + 184, + 835, + 379, + 848 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "Error Reason: The model overcomplicates the pattern and misinterprets the progression of shapes. It assumes the sequence is based on increasing the number of polygon sides and selects the hexagon (option A), expecting a 6-sided figure to follow the 5-sided pentagon. However, the actual pattern does not follow this sequence, as could be reasoned from the first three figures.", + "bbox": [ + 184, + 849, + 812, + 898 + ], + "page_idx": 48 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 48 + }, + { + "type": "page_number", + "text": "49", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 48 + }, + { + "type": "text", + "text": "Inductive (Hard)", + "text_level": 1, + "bbox": [ + 428, + 112, + 558, + 128 + ], + "page_idx": 49 + }, + { + "type": "image", + "img_path": "images/6dcacdbc633bf2d93fe3048943845a396a23ab6786fee3bb15fbaa39e85fab8c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 593, + 112, + 612, + 126 + ], + "page_idx": 49 + }, + { + "type": "image", + "img_path": "images/17adf262a76fecf8616abb3242adf93be470dbd486216a32ecc0b909fe895b5c.jpg", + "image_caption": [ + "Figure 37: A sample correct case of Inductive Reasoning (difficulty: Hard)." + ], + "image_footnote": [], + "bbox": [ + 228, + 132, + 321, + 202 + ], + "page_idx": 49 + }, + { + "type": "image", + "img_path": "images/e9bc9fcac4a2371d182b0f22eb739189041818bfaeb2e9d2b079bab6c84fa652.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 215, + 205, + 334, + 233 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Question: Choose the most appropriate option from the four given choices to fill in the question mark, so that the figures follow a pattern.", + "bbox": [ + 356, + 150, + 799, + 176 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Ground Truth: (C)", + "bbox": [ + 357, + 186, + 470, + 200 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Claude-3.7-Sonnet-Thinking: This question asks me to identify a pattern in a 3x3 grid of figures and then determine which of the four options (A, B, C, D) should replace the question mark to maintain that pattern.", + "bbox": [ + 184, + 241, + 810, + 258 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Let me analyze each row of the grid to understand the pattern:", + "bbox": [ + 186, + 265, + 421, + 275 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Row 1: Contains line drawings or outlines (thin lines).", + "bbox": [ + 186, + 281, + 388, + 290 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Row 2: Contains solid black shapes (filled in).", + "bbox": [ + 186, + 290, + 357, + 297 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Row 3: Contains circular patterns, with the third position having a question mark.", + "bbox": [ + 186, + 297, + 491, + 306 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Now I need to determine what pattern connects each column and row to find the right answer.", + "bbox": [ + 186, + 311, + 540, + 321 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Looking at each column:", + "bbox": [ + 186, + 328, + 279, + 335 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "- Column 1: A line drawing at top, a solid shape in middle, and a circular pattern at bottom", + "bbox": [ + 186, + 335, + 524, + 344 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "- Column 2: Same structure as column 1", + "bbox": [ + 186, + 344, + 333, + 352 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "- Column 3: Same structure for the first two rows, but the third position has the question mark", + "bbox": [ + 186, + 352, + 537, + 361 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Now let's look more closely at the circular patterns in row 3:", + "bbox": [ + 186, + 367, + 410, + 375 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "- Position (3,1): Shows circles arranged in a pattern that seems to form two connected groups", + "bbox": [ + 186, + 375, + 531, + 383 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "- Position (3,2): Shows circles arranged in a different pattern, also with connected groups", + "bbox": [ + 186, + 383, + 514, + 391 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "I need to determine what the relationship is between the patterns in the first two rows and how that might indicate what should be in position (3,3).", + "bbox": [ + 186, + 397, + 733, + 407 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Looking at row 1 again:", + "bbox": [ + 186, + 414, + 276, + 422 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "- (1,1): Lines forming an angular shape", + "bbox": [ + 186, + 422, + 331, + 430 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "- (1,2): Lines forming another angular shape", + "bbox": [ + 186, + 430, + 352, + 436 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "- (1,3): Lines forming a third angular shape with an X", + "bbox": [ + 186, + 436, + 385, + 445 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Row 2:", + "bbox": [ + 186, + 452, + 212, + 459 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "- (2,1): Solid black angular shape", + "bbox": [ + 186, + 460, + 308, + 468 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "- (2,2): Solid black angular shape", + "bbox": [ + 186, + 468, + 308, + 476 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "- (2,3): Solid black angular shape", + "bbox": [ + 186, + 476, + 308, + 484 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "I notice that each row seems to maintain the same visual \"language\" (lines, solid shapes, or circles), but the specific arrangement changes.", + "bbox": [ + 186, + 491, + 694, + 501 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Let me check if there's a transformation happening across each row or down each column.", + "bbox": [ + 186, + 506, + 522, + 516 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "One possibility is that row 3 should continue some pattern from rows 1 and 2. Looking at the options:", + "bbox": [ + 186, + 522, + 563, + 532 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Option A: Circles arranged in a pattern", + "bbox": [ + 186, + 537, + 333, + 546 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Option B: Circles arranged in a pattern", + "bbox": [ + 186, + 547, + 333, + 556 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Option C: Circles arranged in a pattern", + "bbox": [ + 186, + 556, + 333, + 564 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Option D: Circles arranged in a pattern", + "bbox": [ + 186, + 564, + 333, + 571 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Looking purely at the row 3 pattern development:", + "bbox": [ + 186, + 577, + 372, + 585 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "- (3,1) has circles arranged in what appears to be a specific pattern", + "bbox": [ + 186, + 585, + 431, + 593 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "- (3,2) has circles arranged in a different pattern", + "bbox": [ + 186, + 593, + 364, + 601 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "- (3,3) should continue this progression in some logical way", + "bbox": [ + 186, + 601, + 408, + 609 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Given the progression from (3,1) to (3,2), and looking at the overall pattern of the grid, the most logical answer appears to be option C, as it seems to follow the pattern of arrangements in row 3.", + "bbox": [ + 186, + 614, + 810, + 633 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "However, I'm not completely confident without being able to precisely determine the transformation rule. Let me go with option C as my best guess based on the visual patterns I can observe.", + "bbox": [ + 186, + 638, + 810, + 655 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "", + "bbox": [ + 186, + 655, + 246, + 662 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "# Pattern Analysis for Figure Sequence", + "bbox": [ + 186, + 662, + 333, + 671 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "I need to identify the pattern in this $3 \\times 3$ grid to determine what should replace the question mark.", + "bbox": [ + 186, + 678, + 550, + 686 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Analyzing the rows:", + "bbox": [ + 186, + 694, + 274, + 702 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "1. **Row 1**: Contains simple line drawings (outlines)", + "bbox": [ + 186, + 702, + 388, + 710 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "2.**Row 2**: Contains solid black shapes (filled versions)", + "bbox": [ + 186, + 710, + 400, + 718 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "3.**Row $3^{**}$ : Contains arrangements of circles", + "bbox": [ + 186, + 718, + 359, + 726 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Analyzing the columns:", + "bbox": [ + 186, + 733, + 287, + 741 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Each column follows a consistent transformation pattern:", + "bbox": [ + 186, + 741, + 400, + 750 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "- The top row shows a basic line pattern", + "bbox": [ + 186, + 750, + 338, + 756 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "- The middle row shows the same pattern filled in black", + "bbox": [ + 186, + 757, + 395, + 763 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "- The bottom row represents the same pattern using circles", + "bbox": [ + 186, + 765, + 405, + 772 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "## Looking at the third column:", + "bbox": [ + 186, + 779, + 307, + 787 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "- Position (1,3): A line pattern with an \"X\" and some angles", + "bbox": [ + 186, + 787, + 405, + 795 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "- Position (2,3): A solid version with square and triangular elements", + "bbox": [ + 186, + 795, + 437, + 803 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "- Position (3,3): Should show the same pattern represented with circles", + "bbox": [ + 186, + 803, + 447, + 811 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Examining the options:", + "bbox": [ + 186, + 818, + 287, + 825 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "- $^{**}$ Option A\\*\\*: Circles arranged in an L-shape with some outliers", + "bbox": [ + 186, + 825, + 429, + 834 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "- $^{**}$ Option B\\*\\*: Circles arranged in a zigzag pattern", + "bbox": [ + 186, + 834, + 377, + 843 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "- $^{**}$ Option C\\*: Circles arranged in a figure-8 or loop pattern with outlying circles", + "bbox": [ + 186, + 843, + 488, + 849 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "- $^{**}$ Option D\\*\\*: Circles arranged in a C-shape with additional elements", + "bbox": [ + 186, + 849, + 449, + 858 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Looking at how the circle arrangements in positions (3,1) and (3,2) correspond to their respective shapes above them, option C best represents the pattern in the third column. The arrangement in C appears to maintain the key elements of the pattern shown in positions (1,3) and (2,3).", + "bbox": [ + 186, + 864, + 810, + 882 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Answer: C", + "bbox": [ + 186, + 888, + 228, + 896 + ], + "page_idx": 49 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 49 + }, + { + "type": "page_number", + "text": "50", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 49 + }, + { + "type": "text", + "text": "Spatial (Easy)", + "text_level": 1, + "bbox": [ + 439, + 112, + 549, + 128 + ], + "page_idx": 50 + }, + { + "type": "image", + "img_path": "images/80da2aa740657911acc5a646847ccea63c96b896cfdddfe6c9198ef82424b766.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 593, + 112, + 612, + 127 + ], + "page_idx": 50 + }, + { + "type": "image", + "img_path": "images/904965bbf4265f24d2577a52604da12422da4d054e267cebc6d8b1a6743bfb7f.jpg", + "image_caption": [ + "Figure 38: A sample error case of Spatial Reasoning (difficulty: Easy)." + ], + "image_footnote": [], + "bbox": [ + 272, + 137, + 395, + 256 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Question: How many blocks are in this 3D tower?", + "bbox": [ + 447, + 172, + 746, + 186 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Options:A:7.B:8.C:9.D:10", + "bbox": [ + 450, + 186, + 625, + 199 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Ground Truth: (C)", + "bbox": [ + 449, + 210, + 562, + 224 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Claude-3.7-Sonnet-Thinking: I need to count the number of blocks in this 3D tower. Let me analyze the structure carefully.", + "bbox": [ + 184, + 261, + 689, + 272 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "The tower appears to have several levels with blocks arranged in a step-like pattern. I'll try to count methodically by identifying each visible block.", + "bbox": [ + 184, + 277, + 732, + 287 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Looking at the image:", + "bbox": [ + 184, + 292, + 269, + 301 + ], + "page_idx": 50 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- At the bottom level (ground level), I can see 3 blocks arranged in an L-shape.", + "- At the second level up, I can see 2 blocks.", + "- At the top level, I can see 1 block." + ], + "bbox": [ + 186, + 301, + 477, + 325 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "So far that's $3 + 2 + 1 = 6$ blocks.", + "bbox": [ + 184, + 332, + 307, + 340 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "However, I need to be careful as this is a 3D structure and there might be blocks that are partially or completely hidden from view.", + "bbox": [ + 184, + 347, + 671, + 357 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Bottom level: I see what appears to be an L-shape with 3 visible cubes. There might be a 4th cube completing the square $(2\\times 2)$ , but it's not clearly visible.", + "bbox": [ + 184, + 363, + 751, + 372 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Middle level: I see 2 cubes.", + "bbox": [ + 186, + 372, + 285, + 378 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Top level: I see 1 cube.", + "bbox": [ + 186, + 378, + 271, + 386 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Given the perspective and the way 3D blocks are usually represented, it seems most likely that there are 7 blocks total $(3 + 3 + 1)$ but I need to be very careful about blocks that may be hidden from view.", + "bbox": [ + 186, + 386, + 810, + 402 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Actually, let me count one more time carefully:", + "bbox": [ + 186, + 410, + 361, + 417 + ], + "page_idx": 50 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Bottom front left block", + "2. Bottom front right block", + "3. Bottom back left block (creating the L-shape)", + "4.Middle front block", + "5.Middle back block", + "6. Top block" + ], + "bbox": [ + 186, + 417, + 361, + 464 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "7. Bottom back right block (which would complete the $2 \\times 2$ square at the bottom) - this one is not clearly visible but would typically be present in a stable structure", + "bbox": [ + 184, + 464, + 789, + 473 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "That gives 7 blocks.", + "bbox": [ + 184, + 479, + 261, + 489 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "After careful consideration, I believe the answer is 7 blocks.", + "bbox": [ + 186, + 494, + 408, + 503 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Actually, I'm not confident about my count. Let me try to visualize this again:", + "bbox": [ + 184, + 510, + 475, + 521 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "The structure appears to be:", + "bbox": [ + 186, + 527, + 292, + 534 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "- Bottom layer: Looks like a $2 \\times 2$ arrangement (4 blocks)", + "bbox": [ + 186, + 534, + 393, + 542 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "- Middle layer: Appears to be 2 blocks", + "bbox": [ + 186, + 542, + 328, + 551 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "-Top layer:1 block", + "bbox": [ + 186, + 551, + 259, + 559 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "That would give us $4 + 2 + 1 = 7$ blocks total.", + "bbox": [ + 186, + 565, + 354, + 574 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "But wait, the image shows what looks like steps. Let me try to count each visible cube:", + "bbox": [ + 184, + 580, + 508, + 590 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "1. Bottom front left", + "bbox": [ + 186, + 590, + 258, + 597 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "2. Bottom front right", + "bbox": [ + 186, + 597, + 264, + 604 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "3.Middle left", + "bbox": [ + 186, + 604, + 240, + 612 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "4.Middle right", + "bbox": [ + 186, + 612, + 243, + 619 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "5. Top middle", + "bbox": [ + 186, + 619, + 240, + 628 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "6. Bottom back left (partially visible)", + "bbox": [ + 186, + 628, + 323, + 636 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "7. Bottom back right (might be hidden)", + "bbox": [ + 186, + 636, + 331, + 643 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Based on what I can see in the image, there appear to be 7 blocks, which corresponds to option (A).", + "bbox": [ + 186, + 643, + 553, + 652 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "", + "bbox": [ + 186, + 652, + 248, + 660 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "# Counting Blocks in the 3D Tower", + "bbox": [ + 186, + 660, + 318, + 667 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "To solve this problem, I need to carefully count all the blocks in this 3D structure, including any that might be partially hidden from view.", + "bbox": [ + 184, + 674, + 697, + 683 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Let me work through this systematically by analyzing each level of the tower:", + "bbox": [ + 184, + 683, + 475, + 691 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Bottom Level (ground level):", + "bbox": [ + 186, + 698, + 307, + 705 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "I can see what appears to be a $2 \\times 2$ arrangement of blocks:", + "bbox": [ + 186, + 705, + 401, + 714 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "That's 4 blocks on the bottom level.", + "bbox": [ + 186, + 714, + 318, + 720 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Middle Level:", + "bbox": [ + 186, + 729, + 251, + 737 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Moving up one level, I can see:", + "bbox": [ + 186, + 737, + 302, + 744 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "- Middle left block (above the bottom front left)", + "bbox": [ + 186, + 744, + 362, + 752 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "- Middle right block (above the bottom back left)", + "bbox": [ + 186, + 752, + 367, + 760 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "That's 2 blocks on the middle level.", + "bbox": [ + 186, + 760, + 318, + 768 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Top Level:", + "bbox": [ + 186, + 775, + 238, + 782 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "At the very top, there is:", + "bbox": [ + 186, + 782, + 279, + 792 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "- One block positioned above the middle right block", + "bbox": [ + 186, + 792, + 380, + 800 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "That's 1 block on the top level.", + "bbox": [ + 186, + 800, + 300, + 808 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Total Count:", + "bbox": [ + 186, + 815, + 246, + 821 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Adding all levels: $4 + 2 + 1 = 7$ blocks total", + "bbox": [ + 186, + 821, + 344, + 830 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Therefore, there are 7 blocks in this 3D tower.", + "bbox": [ + 186, + 830, + 356, + 838 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Answer: A", + "bbox": [ + 186, + 845, + 228, + 854 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Error Category: Spatial / Orientation Error", + "bbox": [ + 184, + 859, + 439, + 873 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Error Reason: The model makes a spatial reasoning error by assuming only 3 blocks at the bottom level,", + "bbox": [ + 184, + 873, + 810, + 885 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "overlooking a likely 4th hidden cube that would complete the L-shape into a full $2 \\times 2$ square base.", + "bbox": [ + 186, + 885, + 754, + 898 + ], + "page_idx": 50 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 50 + }, + { + "type": "page_number", + "text": "51", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 50 + }, + { + "type": "text", + "text": "Spatial (Easy)", + "text_level": 1, + "bbox": [ + 439, + 112, + 549, + 128 + ], + "page_idx": 51 + }, + { + "type": "image", + "img_path": "images/cefa652b64bca2cb3ded5a974bd666119b76399473475757d6e80f5058d67cca.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 593, + 112, + 612, + 126 + ], + "page_idx": 51 + }, + { + "type": "image", + "img_path": "images/1dca39e1bbec887164256f6110fefe845dd7ef508126048d7df6d8430b767d84.jpg", + "image_caption": [ + "Figure 39: A sample correct case of Spatial Reasoning (difficulty: Easy)." + ], + "image_footnote": [], + "bbox": [ + 183, + 141, + 455, + 200 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Question: Some kids are facing towards front and others are facing towards back. How many kids are holding another kid's hand with their right hand?", + "bbox": [ + 462, + 136, + 812, + 172 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Options: A: 2. B: 6. C: 5. D: 1", + "bbox": [ + 464, + 172, + 635, + 185 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Ground Truth: (C)", + "bbox": [ + 464, + 189, + 576, + 203 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Claude-3.7-Sonnet-Thinking: Let's analyze the image and the problem:", + "bbox": [ + 184, + 208, + 630, + 220 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "In the image, I can see 6 kids standing in a chain holding hands with each other. Some are facing forward and some are facing backward.", + "bbox": [ + 183, + 229, + 808, + 253 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "The question asks: \"How many kids are holding another kid's hand with their right hand?\"", + "bbox": [ + 184, + 263, + 669, + 275 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "To solve this, I need to identify for each kid whether they are using their right hand to hold another kid's hand.", + "bbox": [ + 184, + 275, + 774, + 287 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Going from left to right:", + "text_level": 1, + "bbox": [ + 184, + 297, + 316, + 309 + ], + "page_idx": 51 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Kid 1 (blonde hair, purple dress): This kid appears to be facing forward. Her right hand would be on her right side from our perspective. She's using her right hand to hold the hand of Kid 2.", + "2. Kid 2 (gray hair): Appears to be facing forward. Using right hand to hold Kid 3's hand.", + "3. Kid 3 (green outfit): Appears to be facing forward. Using right hand to hold Kid 4's hand.", + "4. Kid 4 (black outfit): Appears to be facing backward (away from us). From Kid 4's perspective, their right hand would be on our left. So Kid 4 is using their right hand to hold Kid 3's hand.", + "5. Kid 5 (pink hair, yellow skirt): Appears to be facing backward. Using right hand to hold Kid 4's hand.", + "6. Kid 6 (light green/mint hair): Appears to be facing forward. Using right hand to not hold anyone (end of chain)." + ], + "bbox": [ + 184, + 309, + 808, + 398 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "So, counting the kids who are using their right hand to hold another kid's hand:", + "bbox": [ + 184, + 407, + 609, + 420 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Kid 1: Yes", + "bbox": [ + 186, + 420, + 241, + 429 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Kid 2: Yes", + "bbox": [ + 186, + 431, + 241, + 441 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Kid 3: Yes", + "bbox": [ + 186, + 441, + 241, + 452 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Kid 4: Yes", + "bbox": [ + 186, + 452, + 241, + 463 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Kid 5: Yes", + "bbox": [ + 186, + 464, + 241, + 474 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Kid 6: No (end of chain)", + "bbox": [ + 186, + 474, + 315, + 487 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "That gives us 5 kids using their right hand to hold another kid's hand.", + "bbox": [ + 184, + 497, + 558, + 510 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "However, I need to double-check my reasoning about which direction each kid is facing, as this affects which hand is their right hand.", + "bbox": [ + 184, + 518, + 808, + 542 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Looking more carefully:", + "text_level": 1, + "bbox": [ + 184, + 553, + 316, + 564 + ], + "page_idx": 51 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Kid 1 (blonde): Facing forward, right hand holding Kid 2's hand. Yes.", + "- Kid 2 (gray hair): Facing forward, right hand holding Kid 3's hand. Yes.", + "- Kid 3 (green outfit): Facing forward, right hand holding Kid 4's hand. Yes.", + "- Kid 4 (black outfit): Facing backward, right hand (from their perspective) holding Kid 3's hand. Yes.", + "- Kid 5 (pink hair): Facing backward, right hand holding Kid 4's hand. Yes.", + "- Kid 6 (mint hair): Facing forward, right hand not holding anyone's hand. No." + ], + "bbox": [ + 184, + 564, + 720, + 631 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "So, there are 5 kids holding another kid's hand with their right hand.", + "bbox": [ + 184, + 641, + 550, + 654 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "The answer should be (C) 5.", + "text_level": 1, + "bbox": [ + 184, + 664, + 334, + 675 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "", + "text_level": 1, + "bbox": [ + 186, + 676, + 272, + 686 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "I need to determine which kids are holding another kid's hand with their right hand by analyzing the image and understanding each child's orientation.", + "bbox": [ + 184, + 686, + 808, + 709 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Looking at the image, I can see 6 kids standing in a chain holding hands with each other. Some are facing forward and some are facing backward.", + "bbox": [ + 184, + 719, + 808, + 743 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Let me analyze each child from left to right:", + "bbox": [ + 184, + 753, + 419, + 765 + ], + "page_idx": 51 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. First child (blonde girl in purple dress): Facing forward. She's using her right hand to hold the second child's hand.", + "2. Second child (gray hair): Facing forward. Using right hand to hold the third child's hand.", + "3. Third child (green outfit): Facing forward. Using right hand to hold the fourth child's hand.", + "4. Fourth child (black outfit): Facing backward. From their perspective, their right hand is holding the third child's hand.", + "5. Fifth child (pink hair in yellow skirt): Facing backward. Their right hand is holding the fourth child's hand.", + "6. Sixth child (light-colored hair): Facing forward. This child is at the end of the chain on the right, so their right hand is not holding anyone's hand." + ], + "bbox": [ + 184, + 775, + 808, + 864 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Counting the kids who are using their right hand to hold another kid's hand: 1st, 2nd, 3rd, 4th, and 5th kids = 5 children total.", + "bbox": [ + 184, + 875, + 808, + 897 + ], + "page_idx": 51 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 51 + }, + { + "type": "page_number", + "text": "52", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 51 + }, + { + "type": "text", + "text": "Spatial (Medium)", + "text_level": 1, + "bbox": [ + 424, + 111, + 563, + 128 + ], + "page_idx": 52 + }, + { + "type": "image", + "img_path": "images/14b366f4185c02e93ebe4219a693c366bf81475a9410c98c0e1a41b887c3316c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 593, + 112, + 612, + 127 + ], + "page_idx": 52 + }, + { + "type": "image", + "img_path": "images/8e7a8efbb9442d3da6e11d1452e1c3d6dfed84fbbc0dca5848c6be76f2f510cd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 276, + 138, + 312, + 167 + ], + "page_idx": 52 + }, + { + "type": "image", + "img_path": "images/1639cb2aa2bb59d6aaebd4716fe3ca69f37c2ae86b1df24cdb81e1da99dfd062.jpg", + "image_caption": [ + "Figure 40: A sample error case of Spatial Reasoning (difficulty: Medium)." + ], + "image_footnote": [], + "bbox": [ + 191, + 179, + 395, + 229 + ], + "page_idx": 52 + }, + { + "type": "image", + "img_path": "images/9b686a95436f9a02fa717ed03cad01e1e16f1c7cc446ebdb14615dd0b806dd7a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 210, + 236, + 222, + 244 + ], + "page_idx": 52 + }, + { + "type": "image", + "img_path": "images/adf5076b567b494c09c346d232e7291ede55ef3f81babce364269d00e4cdb522.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 263, + 237, + 272, + 244 + ], + "page_idx": 52 + }, + { + "type": "image", + "img_path": "images/bea696fa24270a2378f3ea6791555d4d9e3504dd156e7e882d77f1f7fd86dcd8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 313, + 237, + 323, + 244 + ], + "page_idx": 52 + }, + { + "type": "image", + "img_path": "images/fd296ebae832def4298738b303e6e0ecdc616dbe78e9eb939acb6ded360c9ebd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 367, + 236, + 377, + 244 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "Question: Which cube can be made based on the unfolded cube?", + "bbox": [ + 406, + 171, + 789, + 184 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "Ground Truth: (A)", + "bbox": [ + 408, + 200, + 519, + 213 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "Claude-3.7-Sonnet-Thinking: ", + "text_level": 1, + "bbox": [ + 186, + 250, + 344, + 258 + ], + "page_idx": 52 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Let me analyze the folded cube first to understand what I'm looking for:", + "- The cube appears to be mostly gold/yellow with black triangular patterns on some faces", + "- I can see 3 faces of the cube:", + "- Front face: Has a black triangular pattern pointing downward", + "- Top face: Has a black triangular pattern pointing to the right", + "-Right face: Has a black triangular pattern pointing to the left" + ], + "bbox": [ + 186, + 258, + 521, + 306 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "Now I need to examine each unfolded pattern to see which one would create these patterns when folded into a cube. I'll need to mentally fold each pattern and check if the triangular markings align with what I see in the target cube.", + "bbox": [ + 184, + 311, + 810, + 330 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "For option (A):", + "text_level": 1, + "bbox": [ + 186, + 335, + 243, + 344 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "The unfolded pattern shows:", + "bbox": [ + 186, + 344, + 295, + 351 + ], + "page_idx": 52 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- A T-shape with triangular patterns on some faces", + "- When I mentally fold this, the triangular patterns don't seem to align with the target cube. The arrangement doesn't match what we see in the folded cube." + ], + "bbox": [ + 186, + 352, + 764, + 369 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "For option (B):", + "text_level": 1, + "bbox": [ + 186, + 375, + 241, + 383 + ], + "page_idx": 52 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "The unfolded pattern has:", + "- A cross-like shape with triangular patterns", + "- When folded, this arrangement might create the patterns we see in the target cube. I need to check this more carefully." + ], + "bbox": [ + 186, + 383, + 629, + 407 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "For option (C):", + "text_level": 1, + "bbox": [ + 186, + 414, + 241, + 422 + ], + "page_idx": 52 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "This unfolded pattern has:", + "- An L-shape with additional squares and triangular patterns", + "- When mentally folded, the patterns don't seem to match the target cube." + ], + "bbox": [ + 186, + 422, + 460, + 446 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "For option (D):", + "text_level": 1, + "bbox": [ + 186, + 453, + 241, + 460 + ], + "page_idx": 52 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "This unfolded pattern has:", + "- A cross-like shape with triangular patterns in different orientations", + "- When mentally folded, I need to check if this could create the target cube." + ], + "bbox": [ + 186, + 460, + 465, + 484 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "Let me think about this more methodically. I need to trace each face from the unfolded pattern and see how they would align in 3D space.", + "bbox": [ + 184, + 491, + 697, + 500 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "Looking at option (B) more carefully:", + "text_level": 1, + "bbox": [ + 186, + 507, + 325, + 515 + ], + "page_idx": 52 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "If I consider the cross-shape as the standard cube net, with the center becoming the front face, then:", + "- The center face has a triangular pattern pointing downward, which matches the front face of the target cube", + "- The face to the right of center would become the right face, and it has a triangular pattern that would point left when folded, matching the target", + "- The face above the center would become the top face, and it has a triangular pattern that would point right when folded, matching the target", + "This seems to be a match for the folded cube shown at the top." + ], + "bbox": [ + 186, + 516, + 728, + 554 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "Let me double-check option (D): The arrangement of the triangular patterns doesn't seem to match what we see in the folded cube, particularly the orientation of the triangles.", + "bbox": [ + 184, + 561, + 810, + 578 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "I think option (B) is the correct answer.", + "text_level": 1, + "bbox": [ + 186, + 585, + 331, + 593 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "", + "bbox": [ + 186, + 594, + 246, + 601 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "Analyzing Which Unfolded Pattern Creates the Target Cube", + "bbox": [ + 186, + 601, + 418, + 609 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "I need to determine which of the unfolded cube patterns (a, b, c, d) would create the folded cube shown at the top when assembled.", + "bbox": [ + 186, + 616, + 674, + 625 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "Examining the Target Cube", + "text_level": 1, + "bbox": [ + 186, + 631, + 302, + 640 + ], + "page_idx": 52 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "First, I'll identify what's visible on the folded cube:", + "- Front face: Has a black triangular pattern pointing downward", + "- Top face: Has a black triangular pattern pointing to the right", + "- Right face: Has a black triangular pattern pointing to the left" + ], + "bbox": [ + 186, + 641, + 421, + 672 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "Analyzing Each Unfolded Pattern", + "bbox": [ + 186, + 678, + 326, + 686 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "Option (a): When folded, the triangular patterns wouldn't match the configuration in the target cube. The orientations and positions of the triangles don't align correctly.", + "bbox": [ + 184, + 694, + 810, + 710 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "Option (b): If I mentally fold this pattern:", + "text_level": 1, + "bbox": [ + 186, + 717, + 357, + 724 + ], + "page_idx": 52 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The central face (with downward-pointing triangle) would be the front face", + "- The face above it (with rightward-pointing triangle) would become the top face", + "- The face to the right of center (with leftward-pointing triangle) would become the right face", + "This perfectly matches the pattern seen in the target cube." + ], + "bbox": [ + 186, + 726, + 532, + 757 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "Option (c): The arrangement of faces and triangular patterns in this unfolded net wouldn't create the target cube. The orientations of the triangles don't match when folded.", + "bbox": [ + 184, + 763, + 810, + 780 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "Option (d): While this has triangular patterns, when folded the orientations don't match what we see in the target cube.", + "bbox": [ + 186, + 787, + 645, + 796 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "Therefore, option (b) is the only unfolded pattern that would create the target cube when folded.", + "bbox": [ + 186, + 801, + 545, + 811 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "Answer: B", + "bbox": [ + 186, + 818, + 227, + 825 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "Error Category: Spatial / Orientation Error", + "text_level": 1, + "bbox": [ + 186, + 832, + 439, + 844 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "Error Reason: The model incorrectly visualizes how the triangular patterns on the unfolded net align when folded into a cube. It selects option B, mistakenly believing the triangle orientations will match the folded view, but fails to correctly account for how the triangles rotate and face different directions when assembled.", + "bbox": [ + 184, + 845, + 810, + 892 + ], + "page_idx": 52 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 52 + }, + { + "type": "page_number", + "text": "53", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 52 + }, + { + "type": "text", + "text": "Spatial (Medium)", + "text_level": 1, + "bbox": [ + 424, + 112, + 563, + 128 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/5173ddc5146d9cfeb2d74a7a9f9bb884fa2ce8df452523de82ced9c0974ae68a.jpg", + "image_caption": [ + "Figure 41: A sample correct case of Spatial Reasoning (difficulty: Medium)." + ], + "image_footnote": [], + "bbox": [ + 369, + 135, + 415, + 186 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/65301e5a16dcfa61b2c31b244def2e72eb53721f2d5f98a279f1ff866ed35b6e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 429, + 135, + 475, + 186 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/c8fb5f58b9700cf16ecc6cb3575f9894069c4c885cbea362069dec97779eacbd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 493, + 135, + 539, + 186 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/10bcbe79e5c7c5d962a5e38863c039aae6fad5ad4603da5cacbc3e1b9c18cc88.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 553, + 135, + 581, + 186 + ], + "page_idx": 53 + }, + { + "type": "image", + "img_path": "images/2bbfec971459982369375ff4238e17715565acb6e3858aa51c607ea4534d78fc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 598, + 135, + 619, + 186 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "Question: Paulo took a rectangular sheet of paper, yellow on one side and green on the other side and, with several folds shown in the dotted lines in the figure below, made a little paper plane. To give the airplane a charm, Paulo made a circular hole, marked on the last figure. After playing a lot with the plane, Paulo unfolded the sheet and realized that there were several holes in it. How many holes did he count?", + "bbox": [ + 183, + 193, + 810, + 242 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "Options: A: 6. B: 7. C: 8. D: 9", + "bbox": [ + 184, + 242, + 357, + 253 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "Ground Truth: (C)", + "text_level": 1, + "bbox": [ + 184, + 257, + 297, + 270 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "Claude-3.7-Sonnet-Thinking: Let me analyze this problem step by step.", + "bbox": [ + 184, + 273, + 500, + 284 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "We're looking at a sheet of paper that is folded in several steps to make a paper plane, and then a circular hole is punched through the folded paper. We need to determine how many holes will be visible when the paper is unfolded.", + "bbox": [ + 184, + 289, + 808, + 306 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "From the image, I can see:", + "bbox": [ + 184, + 311, + 285, + 321 + ], + "page_idx": 53 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. A rectangular yellow sheet of paper (yellow on one side, green on the other)", + "2. The paper is folded along dotted lines in several steps", + "3. In the final paper plane configuration, a circular blue hole is punched through" + ], + "bbox": [ + 186, + 321, + 485, + 345 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "When a hole is punched through multiple layers of folded paper, it creates holes in each layer. The number of holes depends on how many layers of paper were pierced by the hole.", + "bbox": [ + 184, + 351, + 808, + 369 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "Looking at the folding process:", + "bbox": [ + 184, + 375, + 303, + 385 + ], + "page_idx": 53 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Second image: Top portion folded to create a triangular shape (green side now showing on top)", + "- Third image: Side portions folded (more green showing)", + "- Fourth image: Further folding", + "- Fifth image: Final paper plane with a blue circular hole" + ], + "bbox": [ + 186, + 391, + 545, + 422 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "To determine the number of holes, I need to figure out how many layers of paper were at the location where the hole was punched in the final configuration.", + "bbox": [ + 184, + 429, + 767, + 439 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "It's a bit difficult to precisely determine from just these diagrams, but I can make a reasonable analysis:", + "bbox": [ + 184, + 444, + 568, + 454 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "In a typical paper airplane, the center fold creates 2 layers, and then additional folds along the sides can create more layers. The hole appears to be punched near the tip of one wing.", + "bbox": [ + 184, + 460, + 810, + 478 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "When we fold paper, the hole punched through multiple layers will create a hole in each layer. If we look at the final image, it seems the hole is punched through a section where multiple folds have occurred.", + "bbox": [ + 184, + 484, + 808, + 502 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "Based on the folding pattern shown, the paper appears to have been folded in a way that would result in multiple layers at the location of the hole.", + "bbox": [ + 184, + 506, + 732, + 517 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "Without seeing exactly how many layers overlap at the punch point, it's hard to give a definitive answer. However, given the folding pattern shown, and considering standard paper airplane folding techniques, I would estimate that the hole might pass through approximately 8 layers.", + "bbox": [ + 184, + 522, + 808, + 541 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "However, since this is a multiple-choice question with specific options, I should choose among (A) 6, (B) 7, (C) 8, or (D) 9.", + "bbox": [ + 184, + 545, + 635, + 556 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "Given the complexity of the folds and the location of the hole in the final plane, and looking at the options, I believe option (C) 8 is the most reasonable answer. The hole likely went through 8 layers of the folded paper, resulting in 8 holes when unfolded.", + "bbox": [ + 184, + 561, + 810, + 579 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "Actual, let me think about this more carefully. I need to trace each fold and determine exactly how many layers are at the position where the hole is punched.", + "bbox": [ + 184, + 584, + 774, + 594 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "The hole appears to be near one tip of the final paper plane. Based on the folding pattern:", + "bbox": [ + 184, + 601, + 519, + 609 + ], + "page_idx": 53 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. The initial fold creates a triangle on top (1 fold, 2 layers)", + "2. Then side folds create more layers", + "3. The final folds create the completed plane" + ], + "bbox": [ + 186, + 609, + 403, + 633 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "Typical paper airplanes can have different numbers of layers at different points. The tip might have 8 layers if several folds pass through that area.", + "bbox": [ + 184, + 638, + 728, + 648 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "Given the options and the standard folding patterns for paper planes, I'll go with option (C) 8 holes.", + "bbox": [ + 184, + 655, + 557, + 664 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "", + "bbox": [ + 186, + 664, + 248, + 671 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "Counting Holes in the Unfolded Paper", + "bbox": [ + 186, + 671, + 339, + 680 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "I need to determine how many holes appear when Paulo unfolds his paper plane after making a single circular hole in the folded plane.", + "bbox": [ + 184, + 686, + 689, + 696 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "Analyzing the Folding Process", + "bbox": [ + 186, + 702, + 313, + 710 + ], + "page_idx": 53 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Looking at the image sequence:", + "1. We start with a flat rectangular sheet that's yellow on one side and green on the other", + "2. The paper goes through several folding steps (shown by dotted lines)", + "3. In the final paper plane configuration, a single circular hole (blue) is punched through" + ], + "bbox": [ + 186, + 717, + 514, + 750 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "Why Multiple Holes Appear", + "bbox": [ + 184, + 756, + 307, + 766 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "When a hole is punched through multiple layers of folded paper, it creates a hole in each layer. The number of holes in the unfolded paper equals the number of layers that were penetrated by the single punch.", + "bbox": [ + 184, + 771, + 808, + 789 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "## Counting the Layers", + "bbox": [ + 184, + 795, + 277, + 805 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "Following the folding process carefully and looking at the final plane:", + "bbox": [ + 184, + 811, + 446, + 820 + ], + "page_idx": 53 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The initial rectangular sheet is folded several times", + "- Each fold creates multiple overlapping layers", + "- The hole appears to be punched through the right wing of the completed plane" + ], + "bbox": [ + 186, + 820, + 485, + 843 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "Based on the specific folding pattern shown in the diagrams, when the sheet is completely folded into the paper plane configuration, the location where the hole is punched consists of 8 layers of paper stacked together.", + "bbox": [ + 184, + 849, + 810, + 867 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "Therefore, when Paulo unfolds the paper, he would find 8 holes in the sheet - one for each layer that was penetrated by the single punch.", + "bbox": [ + 184, + 872, + 694, + 882 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "Answer: C", + "bbox": [ + 186, + 888, + 227, + 897 + ], + "page_idx": 53 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 53 + }, + { + "type": "page_number", + "text": "54", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 53 + }, + { + "type": "text", + "text": "Spatial (Hard)", + "text_level": 1, + "bbox": [ + 437, + 112, + 550, + 128 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/a04ebb8a43502409b5fd77351daf18ccb85f6f642ae25c7224e556230afbcbc6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 593, + 112, + 612, + 126 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/14c6ef28685649387593825d78b70250342fe608769574e9273cfde733c2252b.jpg", + "image_caption": [ + "Figure 42: A sample error case of Spatial Reasoning (difficulty: Hard)." + ], + "image_footnote": [], + "bbox": [ + 282, + 140, + 377, + 185 + ], + "page_idx": 54 + }, + { + "type": "image", + "img_path": "images/aab8e64fbb9a68e5fd7bb86a766ebaf859645b89ced9f8bad955ead33c881c8a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 388, + 143, + 705, + 181 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "Question: Betty walked around the park once, starting from the marked point in the direction of the arrow. She took 4 pictures. In which order did she take the pictures?", + "bbox": [ + 184, + 196, + 810, + 222 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "Options: A: 1234. B: 1324. C: 2143. D: 2341", + "bbox": [ + 184, + 223, + 433, + 234 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "Ground Truth: (C)", + "text_level": 1, + "bbox": [ + 184, + 241, + 297, + 253 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "Claude-3.7-Sonnet-Thinking: This problem requires me to determine the order in which Betty took 4 pictures as she walked around a park. I need to look at the diagram and figure out the sequence.", + "bbox": [ + 184, + 261, + 810, + 281 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "First, I'll analyze what I see in the leftmost diagram, which seems to show the park layout:", + "bbox": [ + 184, + 287, + 570, + 297 + ], + "page_idx": 54 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- There's a cylindrical object (like a column or tree trunk)", + "- There's a cone-shaped object (like a pine tree or monument)", + "- There's a dome-shaped object (like a small hill or rock)", + "- There's an arrow indicating the direction Betty walked" + ], + "bbox": [ + 186, + 297, + 446, + 333 + ], + "page_idx": 54 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Then I have 4 numbered pictures, each showing different arrangements of these objects:", + "- Picture 1: Shows the dome shape on the left and the cylinder on the right", + "- Picture 2: Shows the cone shape on the left and the cylinder on the right", + "- Picture 3: Shows the cylinder on the left and the dome shape on the right.", + "- Picture 4: Shows the cylinder on the left and both the dome shape and cone shape on the right" + ], + "bbox": [ + 186, + 342, + 591, + 387 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "I need to determine the order in which Betty would see these views as she walks around the park in the direction of the arrow.", + "bbox": [ + 184, + 395, + 722, + 405 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "Looking at the main diagram, the arrow indicates she's walking counterclockwise around the park.", + "bbox": [ + 184, + 412, + 606, + 422 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "If I start at the marked point and move in the direction of the arrow (counterclockwise):", + "bbox": [ + 184, + 431, + 558, + 440 + ], + "page_idx": 54 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Betty would first see the dome on her left and the cylinder on her right - this matches Picture 1", + "2. As she continues counterclockwise, she would next see the cone on her left and the cylinder on her right - this matches Picture 2", + "3. Continuing further, she would see the cylinder on her left and the dome on her right - this matches Picture 3", + "4. Finally, completing the circle, she would see the cylinder on her left with both the dome and cone on her right - this matches Picture 4" + ], + "bbox": [ + 186, + 441, + 761, + 474 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "So the order of the pictures would be 1, 2, 3, 4, which corresponds to option (A) 1234.", + "bbox": [ + 184, + 484, + 547, + 494 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "Let me double-check this logic:", + "bbox": [ + 184, + 502, + 320, + 512 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "From the starting position and moving counterclockwise:", + "bbox": [ + 184, + 520, + 431, + 529 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "- First view: dome on left, cylinder on right (Picture 1)", + "bbox": [ + 186, + 529, + 416, + 537 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "- Second view: cone on left, cylinder on right (Picture 2)", + "bbox": [ + 186, + 539, + 423, + 547 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "- Third view: cylinder on left, dome on right (Picture 1)", + "bbox": [ + 186, + 549, + 413, + 556 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "- Fourth view: cylinder on left, dome and cone on right (Picture 4)", + "bbox": [ + 186, + 556, + 467, + 565 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "This confirms that the answer is (A) 1234.", + "bbox": [ + 186, + 573, + 362, + 582 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "", + "bbox": [ + 186, + 583, + 258, + 590 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "# Determining Picture Order in Betty's Park Walk", + "bbox": [ + 186, + 590, + 398, + 599 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "I need to figure out the order in which Betty took 4 pictures as she walked once around the park in the direction of the arrow.", + "bbox": [ + 184, + 608, + 715, + 618 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "Analyzing the Park Layout", + "bbox": [ + 184, + 627, + 316, + 635 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "From the first diagram, I can see:", + "bbox": [ + 186, + 636, + 326, + 643 + ], + "page_idx": 54 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- A cylindrical object (looks like a column/tree trunk)", + "- A cone-shaped object (like a pointed tree or monument)", + "- A dome-shaped object (like a small hill or rock)", + "- An arrow indicating counterclockwise movement" + ], + "bbox": [ + 186, + 645, + 429, + 680 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "Analyzing Each Picture", + "bbox": [ + 184, + 689, + 303, + 696 + ], + "page_idx": 54 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. **Picture 1**: Shows the dome shape on left, cylindrical object on right", + "2. **Picture 2**: Shows the cone shape on left, cylindrical object on right", + "3. **Picture 3**: Shows the cylindrical object on left, dome shape on right", + "4. **Picture 4**: Shows the cylindrical object on left, both dome and cone shapes on right" + ], + "bbox": [ + 186, + 698, + 560, + 733 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "Determining the Sequence", + "bbox": [ + 184, + 742, + 315, + 751 + ], + "page_idx": 54 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Starting from the marked point and moving counterclockwise (following the arrow):", + "1. First, Betty would see the dome on her left and the cylinder on her right $\\rightarrow^{**}$ Picture $1^{**}$", + "2. Continuing counterclockwise, she would next see the cone on her left and the cylinder on her right $\\rightarrow^{**}$ Picture $2^{**}$", + "3. Moving further, she would see the cylinder on her left and the dome on her right $\\rightarrow^{**}$ Picture 3\\*\\*", + "4. Finally, she would see the cylinder on her left with both the dome and cone on her right $\\rightarrow^{**}$ Picture $4^{**}$" + ], + "bbox": [ + 186, + 752, + 681, + 795 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "This gives us the sequence 1-2-3-4.", + "bbox": [ + 184, + 804, + 333, + 814 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "Answer: A", + "bbox": [ + 186, + 821, + 233, + 830 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "Error Category: Spatial / Orientation Error", + "bbox": [ + 184, + 835, + 439, + 848 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "Error Reason: The model makes a spatial orientation error by incorrectly interpreting the shifting perspectives as Betty walks around the park, especially at the starting point. It assumes a consistent viewpoint but fails to mentally rotate the scene to match what Betty would actually see from each new position.", + "bbox": [ + 184, + 849, + 810, + 898 + ], + "page_idx": 54 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 54 + }, + { + "type": "page_number", + "text": "55", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 54 + }, + { + "type": "text", + "text": "Spatial (Hard)", + "text_level": 1, + "bbox": [ + 437, + 112, + 550, + 128 + ], + "page_idx": 55 + }, + { + "type": "image", + "img_path": "images/2586382fa16ad27a1d1de419769a9996fc1643298d2473d0ee193d5899f640c7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 593, + 112, + 612, + 126 + ], + "page_idx": 55 + }, + { + "type": "image", + "img_path": "images/6e1c6e63d19fe7d1bf2557d0f987c2866548f050e00b0b561dc19978100bddba.jpg", + "image_caption": [ + "Figure 43: A sample correct case of Spatial Reasoning (difficulty: Hard)." + ], + "image_footnote": [], + "bbox": [ + 187, + 145, + 251, + 170 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "A", + "bbox": [ + 215, + 179, + 223, + 185 + ], + "page_idx": 55 + }, + { + "type": "image", + "img_path": "images/b7644b24bb6cc17803106504397075a33326ffdc8d6e7a7d2ceb81edbe695538.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 272, + 140, + 326, + 172 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "B", + "bbox": [ + 295, + 178, + 303, + 185 + ], + "page_idx": 55 + }, + { + "type": "image", + "img_path": "images/988fe318a4b806c51614855dc118b5905c5628e9d1a730257062c2d4125fef59.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 341, + 140, + 395, + 172 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "C", + "bbox": [ + 364, + 178, + 372, + 185 + ], + "page_idx": 55 + }, + { + "type": "image", + "img_path": "images/b762364e4fbdadb57bcfa6d9c604c9302b6522c57bd98dcd1c60d88d2028f45b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 415, + 140, + 468, + 172 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "D", + "bbox": [ + 439, + 178, + 447, + 185 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "Question: From the flat expansion view of paper boxes, which paper box is different from the other three?", + "bbox": [ + 478, + 141, + 810, + 167 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "Ground Truth: (A)", + "bbox": [ + 478, + 175, + 591, + 189 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "Claude-3.7-Sonnet-Thinking: This problem is asking me to identify which one of the four flat expansions of paper boxes is different from the others. Let me analyze each one carefully.", + "bbox": [ + 183, + 202, + 810, + 220 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "These patterns represent unfolded 3D boxes (nets), where the squares would fold to create a 3D shape. The black squares and circles represent some kind of marking or pattern on the surface of the box.", + "bbox": [ + 183, + 228, + 808, + 247 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "Let me analyze the structure of each expansion:", + "bbox": [ + 184, + 255, + 390, + 265 + ], + "page_idx": 55 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "A: This appears to be a linear arrangement of 5 squares with a T-shape extension. The pattern has black regions in various squares.", + "B: This is a more complex shape with 7 squares arranged in a cross-like pattern. It has black regions in various squares.", + "C: This also appears to be an arrangement of squares that could fold into a box, with black markings.", + "D: This is another cross-shaped arrangement of squares with black markings." + ], + "bbox": [ + 184, + 265, + 740, + 301 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "To determine which one is different, I need to see if they would all fold into valid 3D boxes and if there's any pattern that sets one apart.", + "bbox": [ + 184, + 308, + 761, + 319 + ], + "page_idx": 55 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Let me think about how these would fold:", + "- A cube has 6 faces, so a valid net should have exactly 6 squares", + "- The squares must be connected in a way that allows folding into a cube" + ], + "bbox": [ + 184, + 327, + 496, + 354 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "Let me count the squares in each:", + "bbox": [ + 184, + 362, + 328, + 371 + ], + "page_idx": 55 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "A: I count 5 squares", + "B: I count 7 squares", + "C: I count 7 squares", + "D:1 count 7 squares" + ], + "bbox": [ + 184, + 371, + 271, + 407 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "Looking at the number of squares, A has 5 squares while B, C, and D each have 7 squares. A cube needs 6 faces, so none of these would fold into a perfect cube. A is different in that it has fewer squares than the others.", + "bbox": [ + 183, + 415, + 808, + 435 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "However, let me look more carefully at these patterns to see if there's another difference.", + "bbox": [ + 184, + 441, + 562, + 452 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "Actually, let me recount:", + "bbox": [ + 184, + 460, + 292, + 469 + ], + "page_idx": 55 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "A: Looking more carefully, I see 6 squares arranged in a T-pattern with an extension", + "B: I see 7 squares arranged in a cross-pattern with extensions", + "C: I see 7 squares arranged in a pattern", + "D: I see 7 squares arranged in a cross-pattern" + ], + "bbox": [ + 184, + 469, + 542, + 506 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "So option A has 6 squares, while B, C, and D each have 7 squares. Since a cube has 6 faces, A is the only one that could potentially fold into a proper cube. The others have an extra square.", + "bbox": [ + 183, + 513, + 808, + 532 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "Let me check once more carefully:", + "bbox": [ + 184, + 540, + 333, + 549 + ], + "page_idx": 55 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "A: I count squares from left to right and top to bottom: 1, 2, 3, 4, 5, 6. So there are 6 squares.", + "B: I count 7 squares", + "C: I count 7 squares.", + "D: I count 7 squares." + ], + "bbox": [ + 184, + 549, + 571, + 585 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "So based on the number of squares, A is different from the others because it has 6 squares while the others have 7 squares.", + "bbox": [ + 183, + 593, + 702, + 604 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "But let me think about whether there could be another difference that's more subtle, like the pattern of black regions or the connectivity of the squares.", + "bbox": [ + 183, + 609, + 808, + 630 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "Looking at the patterns of black and white regions, they all seem to have black regions and circles in different arrangements. Looking at the connectivity of the squares, they all have different patterns of how the squares connect.", + "bbox": [ + 184, + 637, + 712, + 657 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "I think the key difference is the number of squares. A has 6 squares which is the exact number needed for a cube. B, C, and D all have 7 squares, which is one too many for a standard cube - these would need to have one square overlapping or not used when folded.", + "bbox": [ + 184, + 664, + 808, + 684 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "So my answer is A.", + "bbox": [ + 184, + 691, + 269, + 700 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "", + "bbox": [ + 184, + 700, + 254, + 709 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "Analysis of Paper Box Flat Expansions", + "bbox": [ + 184, + 709, + 359, + 719 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "I need to determine which of these flat expansions (nets) is different from the others by examining their structure and properties.", + "bbox": [ + 184, + 726, + 732, + 737 + ], + "page_idx": 55 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "First, let me count the number of squares in each option:", + "- Option A: 6 squares arranged in an L-shape with an extension", + "- Option B: 7 squares arranged in a cross pattern with extensions", + "- Option C: 7 squares arranged in a T-shape with extensions", + "- Option D: 7 squares arranged in a cross pattern with extensions" + ], + "bbox": [ + 184, + 744, + 462, + 790 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "The key difference I notice is that option A has exactly 6 squares, while all the other options (B, C, and D) have 7 squares each.", + "bbox": [ + 184, + 797, + 720, + 808 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "This is significant because a cube has exactly 6 faces, so a proper net for a cube should have exactly 6 squares. Options B, C, and D all have one extra square, which means they would either:", + "bbox": [ + 183, + 815, + 808, + 834 + ], + "page_idx": 55 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Not fold into a proper cube", + "2. Have an overlapping square when folded", + "3. Require leaving one square unused" + ], + "bbox": [ + 184, + 834, + 374, + 861 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "Therefore, option A is different from the other three because it's the only one with the correct number of squares (6) to form a standard cube.", + "bbox": [ + 184, + 868, + 779, + 878 + ], + "page_idx": 55 + }, + { + "type": "text", + "text": "Answer: A", + "bbox": [ + 184, + 887, + 233, + 896 + ], + "page_idx": 55 + }, + { + "type": "header", + "text": "Submission In Progress", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 55 + }, + { + "type": "page_number", + "text": "56", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 55 + } +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10342/0ae2a017-8bf2-4c96-bb86-70cf5464648b_model.json b/data/2025/2504_10xxx/2504.10342/0ae2a017-8bf2-4c96-bb86-70cf5464648b_model.json new file mode 100644 index 0000000000000000000000000000000000000000..86c2d6c9d6e91c4216629db6ae41714dcc41d043 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/0ae2a017-8bf2-4c96-bb86-70cf5464648b_model.json @@ -0,0 +1,25876 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.099, + 0.83, + 0.143 + ], + "angle": 0, + "content": "VISUALPUZZLES: Decoupling Multimodal Reasoning Evaluation from Domain Knowledge" + }, + { + "type": "text", + "bbox": [ + 0.181, + 0.167, + 0.778, + 0.198 + ], + "angle": 0, + "content": "Yueqi Song\\*, Tianyue Ou\\*, Yibo Kong†, Zecheng Li†, Graham Neubig, Xiang Yue {yueqis, tianyueo, gneubig, xyue2}@cs.cmu.edu" + }, + { + "type": "text", + "bbox": [ + 0.395, + 0.208, + 0.621, + 0.226 + ], + "angle": 0, + "content": "Carnegie Mellon University" + }, + { + "type": "text", + "bbox": [ + 0.338, + 0.237, + 0.661, + 0.253 + ], + "angle": 0, + "content": "https://neulab.github.io/VisualPuzzles/" + }, + { + "type": "title", + "bbox": [ + 0.459, + 0.267, + 0.542, + 0.284 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.303, + 0.771, + 0.626 + ], + "angle": 0, + "content": "Current multimodal benchmarks often conflate reasoning with domain-specific knowledge, making it difficult to isolate and evaluate general reasoning abilities in non-expert settings. To address this, we introduce VISUALPUZZLES, a benchmark that targets visual reasoning while deliberately minimizing reliance on specialized knowledge. VISUALPUZZLES consists of diverse questions spanning five categories: algorithmic, analogical, deductive, inductive, and spatial reasoning. One major source of our questions is manually translated logical reasoning questions from the Chinese Civil Service Examination. Experiments show that VISUALPUZZLES requires significantly less intensive domain-specific knowledge and more complex reasoning compared to benchmarks like MMMU, enabling us to better evaluate genuine multimodal reasoning. Evaluations show that state-of-the-art multimodal large language models consistently lag behind human performance on VISUALPUZZLES, and that strong performance on knowledge-intensive benchmarks does not necessarily translate to success on reasoning-focused, knowledge-light tasks. Additionally, reasoning enhancements such as scaling up inference compute (with \"thinking\" modes) yield inconsistent gains across models and task types, and we observe no clear correlation between model size and performance. We also found that models exhibit different reasoning and answering patterns on VISUALPUZZLES compared to benchmarks with heavier emphasis on knowledge. VISUALPUZZLES offers a clearer lens through which to evaluate reasoning capabilities beyond factual recall and domain knowledge." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.647, + 0.825, + 0.791 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.79, + 0.828, + 0.877 + ], + "angle": 0, + "content": "Figure 1: Model accuracy on VISUALPUZZLES compared to human performance percentiles. All evaluated models fall below the human 5th percentile (57.5%), highlighting the difficulty of VISUALPUZZLES. Interestingly, models with explicit \"thinking\" modes do not consistently outperform their base versions, suggesting that current reasoning strategies do not yet generalize well to VISUALPUZZLES's scenarios, even though these strategies have proven effective in existing reasoning tasks that often rely heavily on domain-specific knowledge." + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.266, + 0.061, + 0.709 + ], + "angle": 270, + "content": "arXiv:2504.10342v3 [cs.CL] 30 Apr 2025" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.897, + 0.338, + 0.911 + ], + "angle": 0, + "content": "*Equal Contributions." + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.911, + 0.338, + 0.925 + ], + "angle": 0, + "content": "Equal Contributions." + }, + { + "type": "list", + "bbox": [ + 0.191, + 0.897, + 0.338, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.101, + 0.391, + 0.224 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.393, + 0.102, + 0.608, + 0.223 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.61, + 0.102, + 0.824, + 0.223 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.226, + 0.498, + 0.345 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.225, + 0.824, + 0.345 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.217, + 0.355, + 0.78, + 0.373 + ], + "angle": 0, + "content": "Figure 2: Example VISUALPUZZLES instances within each reasoning category" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.402, + 0.321, + 0.418 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.451, + 0.828, + 0.55 + ], + "angle": 0, + "content": "Reasoning is a cornerstone of both human and artificial intelligence, enabling systems to solve problems, draw inferences, and make decisions from information. Recent advances in multimodal large language models (MLLMs) (OpenAI, 2024; Liu et al., 2023a; Li et al., 2024; Dubey et al., 2024; Qwen Team, 2025a; Yue et al., 2025) exhibit early signs of reasoning in tackling complex tasks such as answering expert-level visual questions (Yue et al., 2024a;b), interpreting scientific diagrams (Roberts et al., 2024), and solving challenging math word problems (Lu et al., 2023)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.556, + 0.829, + 0.697 + ], + "angle": 0, + "content": "Many of the tasks mentioned above are inherently knowledge-intensive; large amounts of knowledge in domains such as science or math are necessary to answer questions correctly (Yue et al., 2024a). However, in reality, reasoning does not necessitate knowledge. Even non-expert humans can successfully solve logic puzzles, spatial reasoning problems, and analogical tasks using general inferential skills, without requiring deep domain expertise. This raises an important question: Can we measure MLLMs's reasoning ability independently of measuring their acquisition of domain-specific knowledge? This question is particularly important with the recent rapid development of reasoning models in the textual domain (Jaech et al., 2024; DeepSeek-AI, 2025; Qwen Team, 2025b), and emerging application to the visual domain (Qwen Team, 2024)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.702, + 0.829, + 0.926 + ], + "angle": 0, + "content": "To address this question, we introduce VISUALPUZZLES, a multimodal benchmark explicitly crafted to assess reasoning capabilities independent of specialized knowledge. VISUALPUZZLES comprises 1,168 carefully curated puzzle-like questions that span five distinct categories of reasoning: algorithmic, analogical, deductive, inductive, and spatial, each annotated with varying difficulty levels. VISUALPUZZLES only requires basic common knowledge and the information presented in the question to solve problems, disentangling reasoning from domain-specific knowledge recall. Our experiments show that VISUALPUZZLES requires significantly fewer domain-specific knowledge concepts compared to benchmarks like MMMU, and models have sufficient knowledge required to solve VISUALPUZZLES questions, enabling us to better assess multimodal reasoning versus pretrained factual knowledge. While VISUALPUZZLES minimizes reliance on domain expertise, its reasoning complexity exceeds that of existing benchmarks: in VISUALPUZZLES, \\(82.1\\%\\) of models' solution steps are logical reasoning steps, compared to \\(71.5\\%\\) in MMMU. Additionally, no current MLLM surpasses even the 5th-percentile human performance, highlighting the benchmark's difficulty and the limitations of today's models in general-purpose visual reasoning." + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.948, + 0.506, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.103, + 0.828, + 0.149 + ], + "angle": 0, + "content": "Our experiments with VISUALPUZZLES reveal critical limitations in current MLLMs' multimodal reasoning ability by factoring out domain-specific knowledge requirements and only focusing on reasoning. Specifically, we uncover four key findings:" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.16, + 0.826, + 0.218 + ], + "angle": 0, + "content": "- Strong performance on knowledge-heavy benchmarks does not transfer well. Models that rank highly on MathVista and MMMU often experience substantial performance drops on VISUALPUZZLES, highlighting a disconnect between knowledge-rich and knowledge-light multimodal reasoning tasks." + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.22, + 0.827, + 0.277 + ], + "angle": 0, + "content": "- Humans outperform models on easy and medium tasks, while both degrade on harder ones. Human participants show strong and consistent performance on easy and medium-level questions across reasoning categories. In contrast, models struggle even on simpler tasks." + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.28, + 0.826, + 0.338 + ], + "angle": 0, + "content": "- Reasoning enhancements (e.g., long CoT and \"thinking\" mode) yield inconsistent gains. While explicit reasoning strategies help certain models tackle complex reasoning tasks, these techniques do not consistently improve performance across all model families and task types." + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.341, + 0.826, + 0.398 + ], + "angle": 0, + "content": "- Scaling model size does not ensure stronger reasoning. We observe no clear trend indicating that larger models outperform smaller ones on VISUALPUZZLES, suggesting that scaling up parameters alone is insufficient to improve domain-agnostic multimodal reasoning." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.16, + 0.827, + 0.398 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.418, + 0.357, + 0.433 + ], + "angle": 0, + "content": "2 VISUALPUZZLES" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.45, + 0.615, + 0.466 + ], + "angle": 0, + "content": "2.1 Motivation and Design Principles of VISUALPUZZLES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.476, + 0.827, + 0.506 + ], + "angle": 0, + "content": "Existing benchmarks often conflate multimodal reasoning with domain-specific knowledge, making it difficult to isolate and measure the pure reasoning capabilities of these models." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.511, + 0.828, + 0.597 + ], + "angle": 0, + "content": "VISUALPUZZLES is designed to explicitly address this issue by providing a testbed focused on evaluating multimodal reasoning in isolation from specialized knowledge. Specifically, VISUALPUZZLES centers on puzzle-like questions that rely solely on the provided image, question text, and basic common-sense reasoning. The core design principle behind VISUALPUZZLES is to limit the need for external or pretrained domain knowledge. Figure 2 shows examples of VISUALPUZZLES within each reasoning category." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.612, + 0.433, + 0.627 + ], + "angle": 0, + "content": "2.2 Data Collection and Curation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.638, + 0.827, + 0.682 + ], + "angle": 0, + "content": "We curated VISUALPUZZLES using a multi-stage pipeline. The process involved sourcing, adapting, and validating questions with an emphasis on reasoning quality and minimal reliance on specialized knowledge." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.687, + 0.827, + 0.789 + ], + "angle": 0, + "content": "Question Sourcing. We collected questions from three primary sources: (1) online resources and textbooks focused on logical, visual, and spatial puzzles, (2) synthesized items using images from large-scale vision datasets paired with text prompts, and (3) carefully repurposed items from existing multimodal reasoning benchmarks. Each source was selected to ensure a wide variety of reasoning challenges while avoiding trivial or fact-heavy questions. One major source of our questions is manually translated logical reasoning questions from the Chinese Civil Service Examination1. Other sources are listed in Appendix A." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.794, + 0.827, + 0.851 + ], + "angle": 0, + "content": "Format Adaptation. All collected items were adapted into a consistent multiple-choice format with four options, balancing between text-based and image-based answer choices. This modality balance allows us to better test models' abilities to perform reasoning across diverse formats." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.829, + 0.902 + ], + "angle": 0, + "content": "Data Validation. During curation, we applied strict filtering criteria to eliminate questions requiring advanced mathematical knowledge, specialized domain knowledge and facts. Questions were retained only if they could be solved using information present in the image," + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.909, + 0.757, + 0.926 + ], + "angle": 0, + "content": "\\(^{1}\\) Chinese Civil Service Examination (Logic Test), 中国国家公务员考试行测(逻辑推理)" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.148 + ], + "angle": 0, + "content": "the question prompt, and basic common sense. A multi-round validation process was conducted by human annotators, focusing on question clarity, solvability, and reasoning type classification." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.153, + 0.785, + 0.17 + ], + "angle": 0, + "content": "Attribute Annotation. Finally, each question was annotated with two key attributes:" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.177, + 0.827, + 0.276 + ], + "angle": 0, + "content": "- Reasoning Category: Each item was categorized as algorithmic, analogical, deductive, inductive, or spatial reasoning. These five categories were selected as they represent fundamental forms of reasoning widely discussed in literature (Liu et al., 2020; Lu et al., 2023; Yue et al., 2024a; Gao et al., 2023). At the same time, we aimed to balance comprehensiveness with conciseness, avoiding an overly fine-grained taxonomy that could dilute the benchmark's clarity and usability. This categorization ensures that VISUALPUZZLES covers a broad yet manageable set of reasoning skills relevant to multimodal LLM evaluation." + }, + { + "type": "text", + "bbox": [ + 0.205, + 0.278, + 0.7, + 0.293 + ], + "angle": 0, + "content": "- Algorithmic Reasoning involves reasoning over algorithmic rules." + }, + { + "type": "text", + "bbox": [ + 0.205, + 0.293, + 0.826, + 0.309 + ], + "angle": 0, + "content": "- Analogical Reasoning requires analyzing the relationships between a pair of entities." + }, + { + "type": "text", + "bbox": [ + 0.205, + 0.309, + 0.826, + 0.324 + ], + "angle": 0, + "content": "- Deductive Reasoning involves logically drawing conclusions from known premises." + }, + { + "type": "text", + "bbox": [ + 0.205, + 0.324, + 0.765, + 0.339 + ], + "angle": 0, + "content": "- Inductive Reasoning focuses on generalizing rules from observed patterns." + }, + { + "type": "text", + "bbox": [ + 0.205, + 0.34, + 0.794, + 0.355 + ], + "angle": 0, + "content": "- Spatial Reasoning requires interpreting and manipulating spatial relationships." + }, + { + "type": "list", + "bbox": [ + 0.205, + 0.278, + 0.826, + 0.355 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.357, + 0.825, + 0.387 + ], + "angle": 0, + "content": "- Difficulty Level: Labeled as easy, medium, or hard, based on annotators' estimated cognitive load and time-to-solve metrics." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.395, + 0.825, + 0.425 + ], + "angle": 0, + "content": "This pipeline ensures that VISUALPUZZLES presents a diverse set of high-quality questions designed to challenge multimodal LLMs on their reasoning abilities without involving" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.425, + 0.4, + 0.439 + ], + "angle": 0, + "content": "pretrained domain knowledge." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.454, + 0.341, + 0.468 + ], + "angle": 0, + "content": "2.3 Dataset Statistics" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.48, + 0.51, + 0.565 + ], + "angle": 0, + "content": "VISUALPUZZLES comprises 1,168 multimodal reasoning puzzles. It is designed to provide a balanced distribution across different reasoning categories, difficulty levels, and option formats for comprehensive evaluation. The statistics of VISUALPUZZLES are shown in Table 1." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.571, + 0.51, + 0.613 + ], + "angle": 0, + "content": "Across the five reasoning types, we maintain a roughly even distribution, ensuring that no single reasoning style dominates the benchmark." + }, + { + "type": "table", + "bbox": [ + 0.524, + 0.433, + 0.822, + 0.584 + ], + "angle": 0, + "content": "
CategoryStatistics
Total Questions1168
- Algorithmic Reasoning262
- Analogical Reasoning211
- Deductive Reasoning200
- Inductive Reasoning209
- Spatial Reasoning286
Easy/Medium/Hard46%/39%/15%
Option Type (Image/Text)57%/43%
AVG. Question Length154.9
% Easy Words54%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.536, + 0.586, + 0.807, + 0.6 + ], + "angle": 0, + "content": "Table 1: Statistics of VISUALPUZZLES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.613, + 0.825, + 0.67 + ], + "angle": 0, + "content": "Similarly, we balanced the dataset across the three difficulty levels (easy, medium, hard) to capture a wide spectrum of cognitive demands. Approximately half of the answer choices in the dataset are image-based and the other half are text-based, enabling evaluation of models' abilities to reason across diverse query formats." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.675, + 0.825, + 0.735 + ], + "angle": 0, + "content": "In terms of language complexity, VISUALPUZZLES was constructed with an emphasis on accessibility. Most of the question text uses Basic English vocabulary2 to minimize the impact of linguistic complexity on reasoning performance, focusing the evaluation strictly on multimodal reasoning." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.74, + 0.825, + 0.783 + ], + "angle": 0, + "content": "Compared to prior benchmarks, VISUALPUZZLES is unique in that it explicitly minimizes domain-specific knowledge requirements while maintaining high reasoning complexity. We demonstrate these traits of VISUALPUZZLES in Section 5." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.802, + 0.428, + 0.82 + ], + "angle": 0, + "content": "3 Experiments and Results" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.833, + 0.363, + 0.85 + ], + "angle": 0, + "content": "3.1 Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.859, + 0.827, + 0.903 + ], + "angle": 0, + "content": "We comprehensively evaluated the reasoning abilities of a variety of MLLMs on VISUALPUZZLES. Additionally, we performed human evaluations to better understand the gap between human and models' reasoning capabilities." + }, + { + "type": "page_footnote", + "bbox": [ + 0.19, + 0.91, + 0.66, + 0.924 + ], + "angle": 0, + "content": "2https://en.wiktionary.org/wiki/Appendix:Basic_English_word_list" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.162 + ], + "angle": 0, + "content": "We selected a diverse set of proprietary and open MLLMs to ensure broad coverage in terms of model architecture, training scale, and intended application domains. This diversity allows us to capture a wide spectrum of current approaches and capabilities in the field. We integrated VISUALPUZZLES into Lmms-eval (Li* et al., 2024)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.167, + 0.827, + 0.266 + ], + "angle": 0, + "content": "Proprietary Models. We evaluate several leading proprietary models that represent the current state of the art: (1) GPT-4o, o1, o3, and o4-mini (OpenAI, 2024; Jaech et al., 2024); (2) Gemini-1.5-Pro, Gemini-2.0-Flash, Gemini-2.0-Flash-Thinking, and Gemini-2.5-Pro (Gemini et al., 2023); (3) Claude-3.5-Sonnet and Claude-3.7-Sonnet (Anthropic, 2022). Among these, o1, o3, o4-mini are explicitly optimized for reasoning, while Gemini-2.0-Flash-Thinking and Claude-3.7-Sonnet incorporate dedicated modules for extensive step-by-step problem-solving." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.271, + 0.828, + 0.37 + ], + "angle": 0, + "content": "Open Models. We further evaluate widely used open MLLMs to gauge how open models compare against proprietary models: (1) LLaVA Series (Liu et al., 2023a; 2024a; Li et al., 2024): LLaVA-1.5 (7B/13B), LLaVA-1.6 (7B/13B/34B), and LLaVA-OV (0.5B/7B/72B); (2) Llama-3.2-Vision-Instruct (11B/90B) (Dubey et al., 2024); (3) Qwen-VL Series (Bai et al., 2024; Yang et al., 2024; Qwen Team, 2025a; 2024): including Qwen-VL, Qwen2-VL (2B/7B/72B-Instruct), Qwen2.5-VL (3B/7B/72B-Instruct), and QvQ-72B-Preview; (4) Cambrian (8B/13B) (Tong et al., 2024); (5) Pangea-7B (Yue et al., 2025)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.376, + 0.825, + 0.435 + ], + "angle": 0, + "content": "We apply both direct multiple-choice prompting and Chain-of-Thought (CoT) prompting to each model, following recent findings that CoT can significantly enhance model reasoning on complex multimodal tasks. For each model we report the best performance, whether achieved by direct multiple-choice prompting or CoT prompting." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.439, + 0.827, + 0.565 + ], + "angle": 0, + "content": "Human Performance. To establish a strong baseline for comparison, we conducted human evaluations with 70 college-level volunteers. Human performance provides a valuable upper-bound reference for assessing the current capabilities and limitations of multimodal reasoning models. While this serves as a benchmark for present-day systems, it is possible that future models could surpass this level of performance. Each participant was randomly assigned a subset of the puzzles and completed them under the same resource-constrained conditions as the models (i.e., without access to external tools or the internet). On average, participants completed each puzzle in 78 seconds, reflecting the typical cognitive load and time demands imposed by VISUALPUZZLES." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.581, + 0.33, + 0.595 + ], + "angle": 0, + "content": "3.2 Overall Results" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.607, + 0.825, + 0.693 + ], + "angle": 0, + "content": "Table 2 and Figure 1 compare the performance of humans and a selected set of models.3 All evaluated models, even the proprietary ones, perform below the 4th percentile of human accuracy, underscoring the significant gap in multimodal reasoning abilities. These results reinforce our finding that, although models have made progress in multimodal understanding, there remains a substantial margin for improvement before they can match or surpass human performance on multimodal reasoning." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.698, + 0.825, + 0.783 + ], + "angle": 0, + "content": "This pattern holds across categories as well. In Table 2, top human participants (95th percentile) exhibit near-perfect accuracy on multiple reasoning categories, while model performance remains substantially lower, even lower than the worst human performance (5th percentile). These results emphasize the need for continued innovation in model architectures and training paradigms if we aim to close the gap between model and human intelligence on complex multimodal reasoning." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.801, + 0.669, + 0.82 + ], + "angle": 0, + "content": "4 Disentangling Reasoning from Domain Knowledge" + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.833, + 0.521, + 0.849 + ], + "angle": 0, + "content": "4.1 Knowledge Intensity of VISUALPUZZLES" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.859, + 0.825, + 0.89 + ], + "angle": 0, + "content": "Is VISUALPUZZLES less knowledge-intensive than existing reasoning benchmarks? This question is central to our goal of disentangling reasoning ability from domain-specific" + }, + { + "type": "page_footnote", + "bbox": [ + 0.17, + 0.897, + 0.825, + 0.926 + ], + "angle": 0, + "content": "3Full results for every model discussed in Section 3 are provided in Appendix D, including separate performance outcomes for both direct multiple-choice and CoT prompting." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.505, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "table", + "bbox": [ + 0.172, + 0.101, + 0.829, + 0.463 + ], + "angle": 0, + "content": "
ModelAlgorithmsAnalogicalDeductiveInductiveSpatialOverall
Random Choice25.025.025.025.025.025.0
Human (95th Percentile)100.0100.0100.081.6100.089.3
Human (50th Percentile)88.066.080.050.090.075.0
Human (5th Percentile)68.125.037.00.059.157.5
Proprietary Models
GPT-4o49.258.349.027.326.241.3
o163.768.367.529.234.351.8
o364.568.369.527.342.754.0
o4-mini65.368.775.533.045.557.0
Gemini-2.0-flash55.358.857.024.431.845.0
Gemini-2.0-flash-thinking46.670.149.024.925.542.2
Gemini-2.5-pro60.064.060.029.736.449.5
Claude-3.7-Sonnet64.548.365.026.837.448.3
Claude-3.7-Sonnet-Thinking67.244.161.531.137.148.2
Open Models (Qwen-Based)
LLaVA-OV-7B27.528.040.524.428.029.4
Pangea-7B32.423.738.528.732.531.3
Qwen2.5-VL-7B-Instruct38.223.751.524.931.133.7
LLaVA-OV-72B34.726.537.027.328.730.8
QvQ-72B-Preview44.843.644.026.830.837.8
Qwen2.5-VL-72B-Instruct53.446.958.025.829.542.3
Open Models (Llama-Based)
Cambrian-8B31.324.236.024.029.028.9
Llama-3.2-11B-Vision-Instruct31.030.839.021.126.229.4
Llama-3.2-90B-Vision-Instruct45.023.243.026.331.534.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.473, + 0.825, + 0.522 + ], + "angle": 0, + "content": "Table 2: Performance (%) comparison of humans and selected models on VISUALPUZZLES. We report the best performance resulting from direct multiple-choice prompting and CoT prompting for each method. We highlighted all the reasoning models." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.541, + 0.825, + 0.584 + ], + "angle": 0, + "content": "knowledge. Many current benchmarks blur this line, making it difficult to assess general reasoning in non-expert settings. VISUALPUZZLES was designed to target visual reasoning skills while deliberately minimizing reliance on specialized knowledge." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.59, + 0.825, + 0.703 + ], + "angle": 0, + "content": "To test whether VISUALPUZZLES achieves this goal, we prompted GPT-4o to generate \"knowledge concept checklists\" for 50 randomly selected questions from a widely-used knowledge-intensive reasoning dataset MMMU and 50 from VISUALPUZZLES. We manually verified each question as discussed in subsection E.3. Each checklist comprises knowledge-specific questions intended to assess whether a model possesses the background information required to solve the original problem. For example, if a question depends on understanding two distinct physics laws, its checklist would include a question to explain each. The number of checklist items per instance serves as a proxy for knowledge intensity." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.708, + 0.532, + 0.82 + ], + "angle": 0, + "content": "We found that MMMU problems resulted in significantly more checklist items on average (3.9) compared to VISUALPUZZLES (1.1), as shown in Table 3. This supports the hypothesis that VISUALPUZZLES is substantially less reliant on domain knowledge. As a result, performance on VISUALPUZZLES more directly reflects a model's ability to reason over visual and textual content, offering" + }, + { + "type": "table", + "bbox": [ + 0.545, + 0.707, + 0.822, + 0.763 + ], + "angle": 0, + "content": "
Benchmark# Knowledge Qs.
MMMU3.9
VISUALPUZZLES1.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.54, + 0.767, + 0.825, + 0.809 + ], + "angle": 0, + "content": "Table 3: AVG. number of knowledge concept questions generated per instance on MMMU vs. VISUALPUZZLES." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.82, + 0.825, + 0.849 + ], + "angle": 0, + "content": "a clearer signal of progress in multimodal reasoning. Full prompt examples and further discussion are provided in Appendix E." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.855, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Do models already possess the knowledge required to solve VISUALPUZZLES? To explore this, we measured models' knowledge accuracy—their ability to answer the knowledge checklist questions correctly—on both benchmarks. This metric reflects how much of the required knowledge is already known by the model, independent of reasoning. We found a stark contrast: while many models exceed \\(90\\%\\) knowledge accuracy on VISUALPUZZLES," + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.105, + 0.518, + 0.264 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.104, + 0.822, + 0.265 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.274, + 0.515, + 0.433 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.275, + 0.818, + 0.433 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.44, + 0.825, + 0.52 + ], + "angle": 0, + "content": "Figure 3: Scatter plots with trend lines of the relationship between accuracy and model size (top) and the relationship between reasoning and knowledge accuracy (bottom) on MMMU and VISUALPUZZLES. The dots' sizes represent relative model sizes. The correlation between reasoning accuracy and knowledge accuracy is higher on MMMU (0.8) than on VISUALPUZZLES (0.4)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.543, + 0.827, + 0.587 + ], + "angle": 0, + "content": "most score below \\(60\\%\\) on MMMU, with smaller models frequently dropping under \\(50\\%\\). Only the largest models approach \\(80\\%\\) accuracy on MMMU, underscoring its heavier reliance on domain-specific knowledge." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.592, + 0.826, + 0.622 + ], + "angle": 0, + "content": "Does scaling up model size improve performance? We also plot reasoning accuracy (i.e., overall performance on the benchmark) in Figure 3, revealing some interesting trends:" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.631, + 0.825, + 0.688 + ], + "angle": 0, + "content": "- MMMU. Larger models tend to have higher knowledge accuracy, and this often translates into higher overall benchmark performance. This aligns with MMMU's reliance on domain-specific understanding; models with more parameters and training data are better at recalling relevant factual knowledge, thus improving their overall performance." + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.691, + 0.825, + 0.775 + ], + "angle": 0, + "content": "- VISUALPUZZLES. Although many models achieve near- \\(100\\%\\) knowledge accuracy on VISUALPUZZLES, we observe no clear increase in both knowledge and reasoning accuracy as model size grows. In contrast to MMMU, simply scaling number of parameters does not guarantee better performance on VISUALPUZZLES, implying that further gains on VISUALPUZZLES must stem from improvements in models' reasoning abilities rather than reliance on extensive knowledge." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.631, + 0.825, + 0.775 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.785, + 0.827, + 0.926 + ], + "angle": 0, + "content": "What is the relationship between knowledge and reasoning? Figure 3 shows two scatter plots with trend lines that measure how knowledge accuracy correlates with reasoning accuracy across different open models, where the relative sizes of the dots represent the sizes of the models. On MMMU (left), there is a strong positive correlation (0.8), suggesting that a model possessing more knowledge strongly correlates better reasoning performance. In contrast, VISUALPUZZLES (right) exhibits a more modest correlation (0.4). Although there is still an upward trend, gains in knowledge accuracy lead to smaller improvements in reasoning accuracy. This discrepancy implies that while overcoming knowledge gaps is central to reasoning success on MMMU, VISUALPUZZLES tasks demand more nuanced inference steps that depends less on domain knowledge." + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.948, + 0.506, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.103, + 0.827, + 0.19 + ], + "angle": 0, + "content": "Overall, these findings reinforce that VISUALPUZZLES's comparatively lower knowledge requirements are readily met by both proprietary and open models. By contrast, MMMU poses a greater challenge to smaller models in terms of knowledge, for which scaling in size clearly benefits knowledge-intensive tasks. However, on VISUALPUZZLES, larger model size alone is not a decisive factor, which might imply that genuine multimodal reasoning depends on more than just number of parameters or pre-trained knowledge." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.206, + 0.533, + 0.221 + ], + "angle": 0, + "content": "4.2 Reasoning Complexity of VISUALPUZZLES" + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.231, + 0.825, + 0.26 + ], + "angle": 0, + "content": "Do questions in VISUALPUZZLES require more complex reasoning than those in existing benchmarks like MMMU?" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.267, + 0.531, + 0.351 + ], + "angle": 0, + "content": "Besides observing that models generally achieve lower accuracy on VISUALPUZZLES compared to MMMU, we further investigated whether this gap stems from increased reasoning complexity. To do so, we measured the proportion of reasoning steps required to solve each question. We began" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.351, + 0.827, + 0.463 + ], + "angle": 0, + "content": "by gathering detailed, step-by-step solutions from the models for each question, which are manually verified for completeness. Then we classified if each step is a logical reasoning step with the help of LLM. We show the result in Table 4. On average, logical reasoning steps take up \\(14.8\\%\\) more total steps in solving VISUALPUZZLES questions compared to those of MMMU (82.1% v.s. 71.5%). This analysis is based on GPT-4o and Gemini-2.0-Flash across 200 randomly sampled questions per benchmark. These results suggest that VISUALPUZZLES demand more extensive reasoning, aligning with its goal of evaluating deeper multimodal reasoning beyond factual recall. Prompt example is shown in Appendix F." + }, + { + "type": "table", + "bbox": [ + 0.545, + 0.265, + 0.822, + 0.309 + ], + "angle": 0, + "content": "
ModelMMMUVISUALPUZZLES
GPT-4o75.1%87.0%
Gemini-2.0-Flash67.9%77.3%
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.54, + 0.313, + 0.825, + 0.343 + ], + "angle": 0, + "content": "Table 4: Percentage of logical reasoning steps in solving benchmark questions." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.478, + 0.658, + 0.494 + ], + "angle": 0, + "content": "4.3 Do Reasoning Models Perform Better than Their Baselines?" + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.513, + 0.391, + 0.622 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.396, + 0.513, + 0.607, + 0.622 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.61, + 0.513, + 0.822, + 0.623 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.627, + 0.828, + 0.725 + ], + "angle": 0, + "content": "Figure 4: Comparison of accuracy and average number of total completion tokens of reasoning models and their general counterparts on VISUALPUZZLES. We didn't include Gemini-2.0-Flash models here because Gemini-2.0-Flash-Thinking does not reveal the number of reasoning tokens of responses. The accuracies of Gemini-2.0-Flash and Gemini-2.0-Flash-Thinking is \\(45.0\\%\\) and \\(42.2\\%\\) respectively. Despite much higher number of completion tokens, reasoning models do not often achieve better performance on VISUALPUZZLES." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.741, + 0.828, + 0.854 + ], + "angle": 0, + "content": "Recent reasoning models often scale up inference compute by generating longer chains of thought (CoTs) to enhance reasoning ability. To assess the effectiveness of this strategy on VISUALPUZZLES, we compare several reasoning models with their non-reasoning counterparts in Figure 4. The reasoning model o1 outperforms GPT-4o overall. However, structured \"thinking\" modes, despite much higher number of completion tokens, show no consistent benefit. Similarity of output further reveals that the thinking mode primarily increases vocabulary without meaningfully altering the underlying reasoning process, as illustrated in Figure 13." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.87, + 0.826, + 0.884 + ], + "angle": 0, + "content": "4.4 Are Branching and Revalidation Reasoning Patterns Effective on VISUALPUZZLES?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "As discussed in Section 4.3, reasoning-enabled models do not consistently outperform their non-reasoning counterparts on VISUALPUZZLES. To better understand this discrepancy, we" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "image", + "bbox": [ + 0.175, + 0.102, + 0.421, + 0.2 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.421, + 0.102, + 0.667, + 0.201 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.679, + 0.104, + 0.824, + 0.201 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.204, + 0.827, + 0.262 + ], + "angle": 0, + "content": "Figure 5: Comparison of Reasoning Pattern of Claude-3.7-Sonnet-Thinking on MMMU and VISUALPUZZLES. Left figure compares the accuracy of Claude-3.7-Sonnet and Claude-3.7-Sonnet-Thinking on MMMU and VISUALPUZZLES. Middle figure shows frequency of each pattern. Right figure shows correlation of the patterns with accuracy on the benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.28, + 0.827, + 0.327 + ], + "angle": 0, + "content": "examine Claude-3.7-Sonnet-Thinking's reasoning behaviors present in long CoTs, specifically, branching and re-validation, which are known to play important roles in enhancing reasoning performance4." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.332, + 0.825, + 0.404 + ], + "angle": 0, + "content": "As shown in Figure 5, our analysis reveals a striking contrast between benchmarks. On MMMU, both branching and re-validation correlate positively with model accuracy. These strategies help models explore alternative reasoning paths and revisit earlier steps, aiding in the retrieval of relevant factual knowledge, an essential component for solving MMMU's knowledge-intensive questions. An illustrative example is provided in Appendix E." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.408, + 0.487, + 0.549 + ], + "angle": 0, + "content": "Surprisingly, on VISUALPUZZLES, these reasoning behaviors are more frequent, yet less predictive of success. Despite their increased presence in long-form responses, we observe no significant correlation between these strategies and task accuracy. This suggests that models may be using branching and re-validation in ways that do not meaningfully contribute to solving the problem." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.555, + 0.487, + 0.682 + ], + "angle": 0, + "content": "Figure 6 highlights this with an example from Claude-3.7-Sonnet-Thinking, where the model applies branching on a VISUALPUZZLES puzzle. However, the additional reasoning paths remain shallow and fail to engage with the core challenge—understanding the spatial arrangement of chairs in the image. The full response is included in Appendix E." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.701, + 0.286, + 0.719 + ], + "angle": 0, + "content": "5 Analysis" + }, + { + "type": "text", + "bbox": [ + 0.501, + 0.408, + 0.816, + 0.448 + ], + "angle": 0, + "content": "Question: Given a pair of chairs arranged at a specific angle, select the option that matches the angle of the chairs in the question. (Answer: C)" + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.449, + 0.82, + 0.517 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.52, + 0.819, + 0.668 + ], + "angle": 0, + "content": "Claude-3.7-Sonnet (With Thinking) \nLet me examine the chairs in the question: \n- The chairs are positioned 180 degrees apart. \n- They are shown in profile view. \nOption A: ...; Option B: ...; Option C: ...; Option D: ... Any of them is correct from an angular perspective.. \nHowever, looking more carefully at the exact orientation: \n- The original chairs are shown in perfect profile view \n- Option A shows chairs in perfect profile view, .... .. \nI'll go with Option A as it presents the cleanest profile view that matches the arrangement in the question." + }, + { + "type": "image_caption", + "bbox": [ + 0.495, + 0.673, + 0.828, + 0.725 + ], + "angle": 0, + "content": "Figure 6: An example of Claude-3.7-Sonnet-Thinking utilizing branching to solve a VISUALPUZZLES puzzle." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.733, + 0.681, + 0.75 + ], + "angle": 0, + "content": "5.1 Do Models Approach VISUALPUZZLES Questions Differently?" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.759, + 0.439, + 0.843 + ], + "angle": 0, + "content": "Table 5 shows the statistics of Claude-3.7-Sonnet-Thinking's answering strategy. We observe a clear divergence in answering strategies between MMMU and VISUALPUZZLES. On MMMU, the" + }, + { + "type": "table", + "bbox": [ + 0.453, + 0.757, + 0.822, + 0.818 + ], + "angle": 0, + "content": "
BenchmarkAnswer-FirstOption-First
MMMU29.3%70.7%
VISUALPUZZLES (Image Options)72.5%27.5%
VISUALPUZZLES (Text Options)98.3%1.7%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.532, + 0.819, + 0.741, + 0.835 + ], + "angle": 0, + "content": "Table 5: Answering Strategy" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.843, + 0.825, + 0.901 + ], + "angle": 0, + "content": "model tend to follow an option-driven approach—using the provided choices early to eliminate unlikely answers and select the most relevant one, often without explicitly solving the problem. In contrast, models more frequently adopt an answer-first strategy on VISUALPUZZLES, attempting to solve the question independently before comparing" + }, + { + "type": "page_footnote", + "bbox": [ + 0.19, + 0.91, + 0.741, + 0.926 + ], + "angle": 0, + "content": "4We examined Claude-3.7-Sonnet-Thinking as it explicitly provides thinking output." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.505, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.345, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.103, + 0.825, + 0.149 + ], + "angle": 0, + "content": "the result to the answer choices. This pattern holds across both textual and image-based options, though the option-first approach appears slightly more often (around \\(30\\%\\)) for image-based tasks—likely due to the added complexity of visual comparison." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.17, + 0.694, + 0.187 + ], + "angle": 0, + "content": "5.2 Does model performance transfer between reasoning categories?" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.197, + 0.451, + 0.389 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.398, + 0.471, + 0.441 + ], + "angle": 0, + "content": "Figure 7: Correlation Heatmap among reasoning categories for models (averaged across all models we evaluated)." + }, + { + "type": "text", + "bbox": [ + 0.478, + 0.198, + 0.828, + 0.449 + ], + "angle": 0, + "content": "Figure 7 presents a correlation heatmap illustrating the relationships among the five reasoning categories in VISUALPUZZLES. We report model correlations averaged across all models in Table 2. For humans, each reasoning category likely engages different cognitive or mental processes (Goel & Dolan, 2004; Green et al., 2010; Bright & Feeney, 2014; Babcock & Vallesi, 2015), so performance in one category might not transfer to performance in another. However, the correlation heatmap of the models tells a different story. We observe notably strong correlations across reasoning categories, with values ranging from 0.11 to as high as 0.94. In particular, algorithmic and deductive reasoning show high correlation (0.94), and other pairs such as algorithmic-analogical and deductive-analogical also exhibit strong associations. This suggests" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.449, + 0.825, + 0.52 + ], + "angle": 0, + "content": "that model performance tends to generalize across categories. However, this generalization may not reflect true reasoning abilities. Instead, the high correlations could indicate that models are leveraging shared surface-level patterns or shortcut strategies that happen to work across multiple structurally different categories, unlike humans, who may rely on distinct cognitive processes." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.541, + 0.321, + 0.557 + ], + "angle": 0, + "content": "5.3 Error Analysis" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.569, + 0.584, + 0.793 + ], + "angle": 0, + "content": "Figure 8 shows a pie chart illustrating the distribution of error categories of 100 instances generated by Claude-3.7-Sonnet-Thinking on VISUALPUZZLES, revealing that reasoning errors dominate at \\(56\\%\\), reinforcing the fact that reasoning is greatest challenge to models in VISUALPUZZLES. Perceptual errors \\((21\\%)\\) and spatial / orientation errors \\((17\\%)\\) also constitute substantial portions of failures, reflecting difficulties in interpreting visual elements and understanding spatial relationships. These three categories together account for \\(94\\%\\) of mistakes, emphasizing a need for multimodal models with stronger reasoning capabilities with more robust perception and spatial understanding. Textual and visual understanding errors \\((4\\%)\\) and reject-to-answer cases \\((2\\%)\\) are relatively rare. Appendix I shows samples of error and correct cases of each reasoning and difficulty category." + }, + { + "type": "image", + "bbox": [ + 0.596, + 0.538, + 0.825, + 0.753 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.592, + 0.755, + 0.825, + 0.786 + ], + "angle": 0, + "content": "Figure 8: Error Distribution of Claude-3.7-Sonnet-Thinking" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.819, + 0.329, + 0.835 + ], + "angle": 0, + "content": "6 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.854, + 0.827, + 0.927 + ], + "angle": 0, + "content": "Multimodal Language Models (MLLMs), particularly vision language models have experienced significant improvements recently. Large scale vision language models (Gemini et al., 2023); (OpenAI, 2024); (Anthropic, 2022); including open weight ones (Li et al., 2024); (Yue et al., 2025); (Liu et al., 2024b); (Tong et al., 2024); (Dubey et al., 2024) are capable of utilizing both image and text inputs to solve challenging questions." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.828, + 0.176 + ], + "angle": 0, + "content": "Multimodal reasoning models, models that specialize in complex reasoning, further push the boundary of MLLMs' capabilities. Large scale multimodal reasoning models such as QVQ (Qwen Team, 2024), Claude-3.7-Sonnet-thinking (Anthropic, 2022), o1 (Jaech et al., 2024), Gemini-2.0-flash-thinking (Gemini et al., 2023) excel in reasoning heavy tasks such as coding and solving math problems." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.181, + 0.828, + 0.254 + ], + "angle": 0, + "content": "Multimodal Reasoning Benchmarks. There exists a number of multimodal benchmarks that test on both the models' world knowledge and reasoning abilities. These benchmarks (Yue et al., 2024a); (Marino et al., 2019); (Liu et al., 2023b); (Yue et al., 2024b); (Authors, 2025) emphasize on the multimodal ability of models as a whole, without further separation of knowledge and reasoning." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.258, + 0.829, + 0.33 + ], + "angle": 0, + "content": "Recently, more multimodal benchmarks have placed emphasis on multimodal logical reasoning abilities. Many of them (Lu et al., 2023); (Wang et al., 2024b) focus primarily on mathematic problems, testing on both mathematical knowledge and reasoning. Some others cover on more general logical reasoning problems (Cherian et al., 2022b); (Gao et al., 2023), testing on both models' knowledge and reasoning in different domains." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.355, + 0.466, + 0.372 + ], + "angle": 0, + "content": "7 Conclusion and Future Work" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.39, + 0.829, + 0.504 + ], + "angle": 0, + "content": "We presented VISUALPUZZLES, a novel multimodal benchmark carefully designed to minimize the impact of domain-specific knowledge and isolate models' core reasoning capabilities. Our results show that while proprietary and large-scale open models achieve relatively higher performance, they still fall short of human-level reasoning—especially on more complex tasks such as analogical and inductive reasoning. Moreover, we observe that strong performance on knowledge-intensive benchmarks like MathVista and MMMU does not necessarily translate into high accuracy on VISUALPUZZLES, underscoring the distinct challenge of knowledge-light reasoning tasks." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.509, + 0.829, + 0.664 + ], + "angle": 0, + "content": "These findings suggest that purely scaling model size and knowledge resources may not suffice for robust multimodal reasoning skills; rather, methods that promote structured reasoning, such as explicit thinking modes or recursive reasoning steps, can offer substantial improvements, particularly for hard questions. Future research can explore new training strategies, specialized architectures, or model interpretations tailored to reduce reliance on memorized facts and enhance logical inference. Extending VISUALPUZZLES to include additional types of multi-image reasoning or temporally dynamic visual information may further stress-test models' core inference abilities. By disentangling domain knowledge from multimodal reasoning, we hope VISUALPUZZLES will serve as a valuable tool for developing and evaluating next-generation MLLMs that excel at genuinely understanding and reasoning about the world without depending heavily on specialized factual knowledge." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.689, + 0.312, + 0.705 + ], + "angle": 0, + "content": "8 Limitations" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.725, + 0.826, + 0.798 + ], + "angle": 0, + "content": "Disentangling Knowledge Despite our best efforts to isolate domain-specific knowledge from the evaluation of multimodal reasoning, VISUALPUZZLES is still not entirely free of knowledge dependencies. Basic familiarity with everyday objects or common scenarios is still required; complete knowledge free evaluation remains an ideal rather than a practical reality." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.817, + 0.826, + 0.86 + ], + "angle": 0, + "content": "Real World Application VISUALPUZZLES emphasizes puzzle-like questions that may not reflect the full diversity of real-world scenarios, limiting generalizability to more specialized domains." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.882, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Question Format VISUALPUZZLES focuses on multiple-choice questions, which may not capture the breadth of open-ended reasoning tasks where models must generate complex textual or visual outputs." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.345, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.828, + 0.149 + ], + "angle": 0, + "content": "Future work can address these limitations by including more varied question formats, broader domains, and more granular analyses of a model's knowledge versus its multimodal reasoning abilities." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.168, + 0.364, + 0.185 + ], + "angle": 0, + "content": "9 Ethical Statement" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.201, + 0.825, + 0.273 + ], + "angle": 0, + "content": "This paper uses samples extracted from existing quiz sources for scholarly analysis and testing purposes, in accordance to US fair use law and standard practice. These data are neither intended for, nor capable of, substituting for the original works; thus, we believe their inclusion does not diminish the market value or utility of the source materials. A complete list of references for the data sources is attached in Appendix A." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.292, + 0.356, + 0.31 + ], + "angle": 0, + "content": "Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.325, + 0.825, + 0.381 + ], + "angle": 0, + "content": "This project was supported in part by a grant from DSTA Singapore and the Carnegie Bosch Institute. The authors would like to thank CMU NeuLab colleagues for their constructive comments. The authors would also like to thank all volunteers who participated in the human evaluation." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.403, + 0.275, + 0.418 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.428, + 0.826, + 0.457 + ], + "angle": 0, + "content": "https://www.anthropic.com/index/introducing-claudeAnthropic. Claude, 2022. URL https://www.anthropic.com/index/introducing-claude." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.468, + 0.826, + 0.497 + ], + "angle": 0, + "content": "Humanity's Last Exam's Authors. Humanity's last exam. ArXiv, abs/2501.14249, 2025. URL https://api-semanticscholar.org/CorpusID:275906652." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.507, + 0.825, + 0.537 + ], + "angle": 0, + "content": "Laura Babcock and Antonino Vallesi. The interaction of process and domain in prefrontal cortex during inductive reasoning. Neuropsychologia, 67:91-99, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.547, + 0.827, + 0.604 + ], + "angle": 0, + "content": "Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-VL: A versatile vision-language model for understanding, localization, text reading, and beyond, 2024. URL https://openreview.net/forum?id=qrGjFJV13m." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.615, + 0.825, + 0.658 + ], + "angle": 0, + "content": "Yonatan Bitton, Ron Yosef, Eliyahu Strugo, Dafna Shahaf, Roy Schwartz, and Gabriel Stanovsky. Vasr: Visual analogies of situation recognition. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 37, pp. 241-249, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.668, + 0.825, + 0.698 + ], + "angle": 0, + "content": "Aimée K Bright and Aidan Feeney. Causal knowledge and the development of inductive reasoning. Journal of Experimental Child Psychology, 122:48-61, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.708, + 0.827, + 0.749 + ], + "angle": 0, + "content": "Anoop Cherian, Kuan-Chuan Peng, Suhas Lohit, Kevin Smith, and Joshua B Tenenbaum. Are deep neural networks smarter than second graders? arXiv preprint arXiv:2212.09993, 2022a." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.761, + 0.827, + 0.817 + ], + "angle": 0, + "content": "Anoop Cherian, Kuan-Chuan Peng, Suhas Lohit, Kevin A. Smith, and Joshua B. Tenenbaum. Are deep neural networks smarter than second graders? 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 10834-10844, 2022b. URL https://api-semanticscholar.org/CorpusID:254877678." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.829, + 0.825, + 0.858 + ], + "angle": 0, + "content": "DeepSeek-AI. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning, 2025. URL https://arxiv.org/abs/2501.12948." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.868, + 0.827, + 0.923 + ], + "angle": 0, + "content": "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. ArXiv preprint, abs/2407.21783, 2024. URL https://arxiv.org/abs/2407.21783." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.428, + 0.827, + 0.923 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.829, + 0.148 + ], + "angle": 0, + "content": "Jingying Gao, Qi Wu, Alan Blair, and Maurice Pagnucco. Lora: A logical reasoning augmented dataset for visual question answering. In Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.154, + 0.83, + 0.211 + ], + "angle": 0, + "content": "Gemini, Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, et al. Gemini: a family of highly capable multimodal models. ArXiv preprint, abs/2312.11805, 2023. URL https://arxiv.org/abs/2312.11805." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.219, + 0.826, + 0.251 + ], + "angle": 0, + "content": "Vinod Goel and Raymond J Dolan. Differential involvement of left prefrontal cortex in inductive and deductive reasoning. Cognition, 93(3):B109-B121, 2004." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.256, + 0.826, + 0.301 + ], + "angle": 0, + "content": "Adam E Green, David JM Kraemer, Jonathan A Fugelsang, Jeremy R Gray, and Kevin N Dunbar. Connecting long distance: semantic distance in analogical reasoning modulates frontopolar cortex activity. Cerebral cortex, 20(1):70-76, 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.307, + 0.829, + 0.351 + ], + "angle": 0, + "content": "Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.358, + 0.826, + 0.415 + ], + "angle": 0, + "content": "Bo Li*, Peiyuan Zhang*, Kaicheng Zhang*, Fanyi Pu*, Xinrun Du, Yuhao Dong, Haotian Liu, Yuanhan Zhang, Ge Zhang, Chunyuan Li, and Ziwei Liu. Lmms-eval: Accelerating the development of large multimoal models, March 2024. URL https://github.com/EvolvingLMMs-Lab/lmms-eval." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.423, + 0.829, + 0.468 + ], + "angle": 0, + "content": "Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.474, + 0.826, + 0.504 + ], + "angle": 0, + "content": "Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning, 2023a. URL https://arxiv.org/abs/2310.03744." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.511, + 0.829, + 0.554 + ], + "angle": 0, + "content": "Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava-last: Improved reasoning,OCR, and world knowledge, 2024a. URL https://arxiv.org/pdf/2401.13601." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.562, + 0.826, + 0.593 + ], + "angle": 0, + "content": "Jian Liu, Leyang Cui, Hanmeng Liu, Dandan Huang, Yile Wang, and Yue Zhang. Logiqa: A challenge dataset for machine reading comprehension with logical reasoning, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.599, + 0.829, + 0.656 + ], + "angle": 0, + "content": "Junpeng Liu, Tianyue Ou, Yifan Song, Yuxiao Qu, Wai Lam, Chenyan Xiong, Wenhu Chen, Graham Neubig, and Xiang Yue. Harnessing webpage uis for text-rich visual understanding. ArXiv, abs/2410.13824, 2024b. URL https://api(semanticscholar.org/ CorpusID:273403951." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.664, + 0.829, + 0.721 + ], + "angle": 0, + "content": "Yuanzhan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, Kai Chen, and Dahua Lin. Mmbench: Is your multi-modal model an all-around player? In European Conference on Computer Vision, 2023b. URL https://api_semanticscholar.org/CorpusID:259837088." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.728, + 0.829, + 0.785 + ], + "angle": 0, + "content": "Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. arXiv preprint arXiv:2310.02255, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.793, + 0.829, + 0.851 + ], + "angle": 0, + "content": "Kenneth Marino, Mohammad Rastegari, Ali Farhadi, and Roozbeh Mottaghi. Ok-vqa: A visual question answering benchmark requiring external knowledge. 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 3190-3199, 2019. URL https://api_semanticscholar.org/CorpusID:173991173." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.858, + 0.829, + 0.888 + ], + "angle": 0, + "content": "OpenAI. Hello gpt4-o. https://openai.com/index/hello-gpt-4o/, 2024. URL https://openai.com/index/hello-gpt-4o/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.895, + 0.829, + 0.925 + ], + "angle": 0, + "content": "Qwen Team. Qvq: To see the world with wisdom, December 2024. URL https://qwenlm.github.io/blog/qvq-72b-preview/." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.83, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.345, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.829, + 0.133 + ], + "angle": 0, + "content": "Qwen Team. Qwen2.5-vl, January 2025a. URL https://qwenlm.github.io/blog/qwen2.5-v1/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.141, + 0.826, + 0.172 + ], + "angle": 0, + "content": "Qwen Team. Qwq-32b: Embracing the power of reinforcement learning, March 2025b. URL https://qwenlm.github.io/blog/qwq-32b/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.179, + 0.829, + 0.236 + ], + "angle": 0, + "content": "Jonathan Roberts, Kai Han, Neil Houlsby, and Samuel Albanie. SciFIBench: Benchmarking large multimodal models for scientific figure interpretation. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2024. URL https://openreview.net/forum?id=HcLFNuQwy5." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.244, + 0.829, + 0.302 + ], + "angle": 0, + "content": "Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, et al. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. ArXiv preprint, abs/2406.16860, 2024. URL https://arxiv.org/abs/2406.16860." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.309, + 0.829, + 0.354 + ], + "angle": 0, + "content": "Ke Wang, Junting Pan, Weikang Shi, Zimu Lu, Houxing Ren, Aojun Zhou, Mingjie Zhan, and Hongsheng Li. Measuring multimodal mathematical reasoning with math-vision dataset. Advances in Neural Information Processing Systems, 37:95095-95169, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.361, + 0.829, + 0.405 + ], + "angle": 0, + "content": "Ke Wang, Junting Pan, Weikang Shi, Zimu Lu, Mingjie Zhan, and Hongsheng Li. Measuring multimodal mathematical reasoning with math-vision dataset, 2024b. URL https:// arxiv.org/abs/2402.14804." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.413, + 0.829, + 0.457 + ], + "angle": 0, + "content": "An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, et al. Qwen2 technical report. ArXiv preprint, abs/2407.10671, 2024. URL https://arxiv.org/abs/2407.10671." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.464, + 0.829, + 0.537 + ], + "angle": 0, + "content": "Xiang Yue, Yuansheng Ni, Kai Zhang, Tianyu Zheng, Ruoqi Liu, Ge Zhang, Samuel Stevens, Dongfu Jiang, Weiming Ren, Yuxuan Sun, Cong Wei, Botao Yu, Ruibin Yuan, Renliang Sun, Ming Yin, Boyuan Zheng, Zhenzhu Yang, Yibo Liu, Wenhao Huang, Huan Sun, Yu Su, and Wenhu Chen. Mmmu: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi. In Proceedings of CVPR, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.544, + 0.829, + 0.588 + ], + "angle": 0, + "content": "Xiang Yue, Tianyu Zheng, Yuansheng Ni, Yubo Wang, Kai Zhang, Shengbang Tong, Yuxuan Sun, Botao Yu, Ge Zhang, Huan Sun, et al. Mmmu-pro: A more robust multi-discipline multimodal understanding benchmark. arXiv preprint arXiv:2409.02813, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.595, + 0.829, + 0.667 + ], + "angle": 0, + "content": "Xiang Yue, Yueqi Song, Akari Asai, Simran Khanuja, Anjali Kantharuban, Seungone Kim, Jean de Dieu Nyandwi, Lintang Sutawika, Sathyanarayanan Ramamoorthy, and Graham Neubig. Pangea: A fully open multilingual multimodal LLM for 39 languages. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=a3g214yEys." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.829, + 0.667 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.1, + 0.507, + 0.123 + ], + "angle": 0, + "content": "Table of Contents in Appendix" + }, + { + "type": "title", + "bbox": [ + 0.175, + 0.142, + 0.826, + 0.157 + ], + "angle": 0, + "content": "A VISUALPUZZLES Statistics 16" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.163, + 0.826, + 0.178 + ], + "angle": 0, + "content": "A.1 Breakdown of Statistics of VISUALPUZZLES 16" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.185, + 0.826, + 0.2 + ], + "angle": 0, + "content": "A.2 Data Sources 16" + }, + { + "type": "list", + "bbox": [ + 0.197, + 0.163, + 0.826, + 0.2 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.219, + 0.826, + 0.235 + ], + "angle": 0, + "content": "B Model Evaluation Setup 16" + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.253, + 0.826, + 0.269 + ], + "angle": 0, + "content": "C Human Annotation Setup 16" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.274, + 0.826, + 0.29 + ], + "angle": 0, + "content": "C.1 Difficulty Labeling 16" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.296, + 0.826, + 0.312 + ], + "angle": 0, + "content": "C.2 Reasoning Category Labeling 17" + }, + { + "type": "list", + "bbox": [ + 0.197, + 0.274, + 0.826, + 0.312 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.329, + 0.826, + 0.344 + ], + "angle": 0, + "content": "D Full Results 17" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.35, + 0.826, + 0.365 + ], + "angle": 0, + "content": "D.1 Full Results w/ CoT 17" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.372, + 0.826, + 0.387 + ], + "angle": 0, + "content": "D.2 Full Results w/n CoT 17" + }, + { + "type": "list", + "bbox": [ + 0.198, + 0.35, + 0.826, + 0.387 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.406, + 0.826, + 0.422 + ], + "angle": 0, + "content": "E Knowledge Checklist 17" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.427, + 0.826, + 0.443 + ], + "angle": 0, + "content": "E.1 Knowledge Checklist Generation 17" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.449, + 0.826, + 0.464 + ], + "angle": 0, + "content": "E.2 Example Knowledge Checklist Question 20" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.47, + 0.826, + 0.486 + ], + "angle": 0, + "content": "E.3 Knowledge Checklist Human Annotation 20" + }, + { + "type": "list", + "bbox": [ + 0.198, + 0.427, + 0.826, + 0.486 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.504, + 0.826, + 0.52 + ], + "angle": 0, + "content": "F Reasoning Complexity 20" + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.538, + 0.826, + 0.554 + ], + "angle": 0, + "content": "G Comparison with Other Benchmarks 20" + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.572, + 0.826, + 0.588 + ], + "angle": 0, + "content": "H Additional Analysis 21" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.594, + 0.826, + 0.61 + ], + "angle": 0, + "content": "H.1 Proprietary V.S. Open Models 21" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.616, + 0.826, + 0.631 + ], + "angle": 0, + "content": "H.2 Reasoning Category and Difficulty Levels 21" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.637, + 0.826, + 0.653 + ], + "angle": 0, + "content": "H.3 Option Types and Difficulty Levels 24" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.658, + 0.826, + 0.674 + ], + "angle": 0, + "content": "H.4 Case Study of Reasoning 25" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.679, + 0.826, + 0.695 + ], + "angle": 0, + "content": "H.5 Impact of CoT 25" + }, + { + "type": "list", + "bbox": [ + 0.198, + 0.594, + 0.826, + 0.695 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.713, + 0.826, + 0.729 + ], + "angle": 0, + "content": "I Case Study 27" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.102, + 0.45, + 0.119 + ], + "angle": 0, + "content": "A VISUALPUZZLES Statistics" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.134, + 0.546, + 0.149 + ], + "angle": 0, + "content": "A.1 Breakdown of Statistics of VISUALPUZZLES" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.16, + 0.681, + 0.177 + ], + "angle": 0, + "content": "Table 6 shows a breakdown of statistics of VISUALPUZZLES questions." + }, + { + "type": "table", + "bbox": [ + 0.194, + 0.187, + 0.805, + 0.328 + ], + "angle": 0, + "content": "
Reasoning CategoryImage OptionsText OptionsTotal
EasyMediumHardEasyMediumHard
Algorithmic21801241009262
Analogical1208110000211
Deductive29242457921200
Inductive770127320209
Spatial12341661523286
Total300224145233233331168
" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.337, + 0.825, + 0.355 + ], + "angle": 0, + "content": "Table 6: Number of questions in each reasoning category, option types, and difficulty levels." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.384, + 0.317, + 0.399 + ], + "angle": 0, + "content": "A.2 Data Sources" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.405, + 0.825, + 0.435 + ], + "angle": 0, + "content": "- Chinese Civil Service Examination (中国国家公务员考试) 5 (224 puzzles): we manually translated questions from this exam to English from Chinese." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.438, + 0.825, + 0.466 + ], + "angle": 0, + "content": "Textbooks (210 puzzles): we carefully collected and re-purposed questions from online resources and textbooks." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.47, + 0.825, + 0.5 + ], + "angle": 0, + "content": "- Smart-101 (Cherian et al., 2022a) (247 puzzles): we carefully selected images from this benchmark and synthesized new questions." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.502, + 0.826, + 0.533 + ], + "angle": 0, + "content": "- MATH-Vision (Wang et al., 2024a) (293 puzzles): we carefully selected and repurposed questions from this benchmark." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.535, + 0.825, + 0.563 + ], + "angle": 0, + "content": "VASR (Bitton et al., 2023) (194 puzzles): we carefully selected questions from this benchmark." + }, + { + "type": "list", + "bbox": [ + 0.214, + 0.405, + 0.826, + 0.563 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.584, + 0.427, + 0.603 + ], + "angle": 0, + "content": "B Model Evaluation Setup" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.619, + 0.821, + 0.734 + ], + "angle": 0, + "content": "
Model Evaluation Prompt with Chain-of-Thought
Solve the multiple-choice question and then answer with the option letter from the given choices. The last line of your response should be of the following format: 'Answer: $LETTER' (without quotes) where LETTER is one of options. Think step by step before answering.
Model Evaluation Prompt w/n Chain-of-Thought
Answer the question with the option's letter from the given choices directly.
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.755, + 0.444, + 0.775 + ], + "angle": 0, + "content": "C Human Annotation Setup" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.787, + 0.361, + 0.804 + ], + "angle": 0, + "content": "C.1 Difficulty Labeling" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.813, + 0.827, + 0.844 + ], + "angle": 0, + "content": "Each question was also carefully assigned a difficulty label from easy, medium, or hard, based on the cognitive load required for reasoning." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.854, + 0.803, + 0.87 + ], + "angle": 0, + "content": "- Easy Level questions could be solved by the annotator in less than one minute." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.872, + 0.826, + 0.889 + ], + "angle": 0, + "content": "- Medium Level questions could be solved by the annotator in one to three minutes." + }, + { + "type": "list", + "bbox": [ + 0.214, + 0.854, + 0.826, + 0.889 + ], + "angle": 0, + "content": null + }, + { + "type": "page_footnote", + "bbox": [ + 0.172, + 0.897, + 0.778, + 0.924 + ], + "angle": 0, + "content": "5https://en.wikipedia.org/wiki/Civil服务体系_of_the_People%27s_Republic_of_China#Examinations." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.345, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.104, + 0.825, + 0.134 + ], + "angle": 0, + "content": "- Hard Level questions require the annotator more than five minutes to solve or quit solving." + }, + { + "type": "title", + "bbox": [ + 0.179, + 0.173, + 0.491, + 0.189 + ], + "angle": 0, + "content": "Annotation Guideline for Puzzle Difficulty" + }, + { + "type": "text", + "bbox": [ + 0.177, + 0.195, + 0.819, + 0.221 + ], + "angle": 0, + "content": "Try to solve the puzzle first. You need to measure the time you attempted to solve each puzzle. Then, select from Easy, Medium, or Hard based on the time required." + }, + { + "type": "text", + "bbox": [ + 0.177, + 0.221, + 0.818, + 0.246 + ], + "angle": 0, + "content": "- Easy Level: You can solve or answer the question within 1 minute. This level of puzzles should require minimal reasoning." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.246, + 0.818, + 0.271 + ], + "angle": 0, + "content": "- Medium Level: You can solve or answer the question within 1-3 minutes. This level of puzzles should demand moderate reasoning." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.271, + 0.818, + 0.297 + ], + "angle": 0, + "content": "- Hard Level: You can / cannot solve this question with more than 5 minutes. This level of puzzles should involve significant / multi-step reasoning." + }, + { + "type": "list", + "bbox": [ + 0.177, + 0.221, + 0.818, + 0.297 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.333, + 0.438, + 0.35 + ], + "angle": 0, + "content": "C.2 Reasoning Category Labeling" + }, + { + "type": "title", + "bbox": [ + 0.179, + 0.372, + 0.569, + 0.388 + ], + "angle": 0, + "content": "Annotation Guideline for Puzzle Reasoning Category" + }, + { + "type": "text", + "bbox": [ + 0.177, + 0.394, + 0.818, + 0.419 + ], + "angle": 0, + "content": "Assign the category that best describes the primary type of reasoning or logic required for each puzzle:" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.42, + 0.818, + 0.444 + ], + "angle": 0, + "content": "- Algorithmic Reasoning: Involves following or devising a step-by-step procedure or rule-based process." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.445, + 0.818, + 0.458 + ], + "angle": 0, + "content": "- Analogical Reasoning: Requires identifying relationships by comparison between pairs of entities." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.458, + 0.792, + 0.47 + ], + "angle": 0, + "content": "- Deductive Reasoning: Involves deriving specific conclusions from general or given premises." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.47, + 0.749, + 0.483 + ], + "angle": 0, + "content": "- Inductive Reasoning: Focuses on generalizing a rule or pattern from specific instances." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.483, + 0.786, + 0.496 + ], + "angle": 0, + "content": "- Spatial Reasoning: Involves visualizing and manipulating shapes, distances, or orientations." + }, + { + "type": "list", + "bbox": [ + 0.177, + 0.394, + 0.818, + 0.496 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.536, + 0.32, + 0.551 + ], + "angle": 0, + "content": "D Full Results" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.577, + 0.367, + 0.592 + ], + "angle": 0, + "content": "D.1 Full Results w/ CoT" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.61, + 0.376, + 0.624 + ], + "angle": 0, + "content": "D.2 Full Results w/n CoT" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.644, + 0.402, + 0.663 + ], + "angle": 0, + "content": "E Knowledge Checklist" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.686, + 0.465, + 0.702 + ], + "angle": 0, + "content": "E.1 Knowledge Checklist Generation" + }, + { + "type": "title", + "bbox": [ + 0.179, + 0.725, + 0.56, + 0.74 + ], + "angle": 0, + "content": "Prompt to Generate Knowledge Checklist Questions" + }, + { + "type": "text", + "bbox": [ + 0.177, + 0.746, + 0.818, + 0.787 + ], + "angle": 0, + "content": "You are an exam writer. You are now writing a knowledge test. You are given a question (Question) regarding an image and its standard solution (Solution), your task is to write free response questions that test on individual knowledge required in answering the question correctly." + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.796, + 0.513, + 0.81 + ], + "angle": 0, + "content": "You should follow these steps to complete the task:" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.811, + 0.575, + 0.823 + ], + "angle": 0, + "content": "1. explicitly analyze the given image, Question, and Solution" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.823, + 0.713, + 0.835 + ], + "angle": 0, + "content": "2. explicitly list out the individual knowledge concepts required to reach Solution." + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.835, + 0.818, + 0.873 + ], + "angle": 0, + "content": "3. write free response questions to test on the definition of each concept listed. Your generated questions should not include details of the given Question. Note that you need to provide answer keys to these questions too." + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.873, + 0.518, + 0.886 + ], + "angle": 0, + "content": "4. format the free response questions in json format." + }, + { + "type": "list", + "bbox": [ + 0.18, + 0.811, + 0.818, + 0.886 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.898, + 0.306, + 0.911 + ], + "angle": 0, + "content": "Question: question" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.911, + 0.293, + 0.922 + ], + "angle": 0, + "content": "Solution: answer" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.345, + 0.048 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.227, + 0.825, + 0.755 + ], + "angle": 0, + "content": "
ModelAlgorithmicAnalogicalDeductiveInductiveSpatialOverall
Random Choice25.025.025.025.025.025.0
Human (95th Percentile)100.0100.0100.081.6100.089.3
Human (50th Percentile)88.066.080.050.090.075.0
Human (5th Percentile)68.125.037.00.059.157.5
Proprietary Models
o4-mini65.368.775.533.045.557.0
o364.568.369.527.342.754.0
o163.768.367.529.234.351.8
GPT-4o49.258.349.027.326.241.3
Gemini-2.5-pro60.064.060.029.736.449.5
Gemini-2.0-flash55.358.857.024.431.845.0
Gemini-2.0-flash-thinking46.670.149.024.925.542.2
Gemini-1.5-Pro53.457.458.526.332.545.0
Claude-3.7-Sonnet64.548.365.026.837.448.3
Claude-3.7-Sonnet-thinking67.244.161.531.137.148.2
Claude-3.5-Sonnet53.447.951.525.434.342.4
Open Models
LLaVA-1.5-7B23.321.836.020.619.223.7
LLaVA-1.5-13B24.821.823.025.425.524.2
LLaVA-1.6-7B27.523.730.022.521.324.8
LLaVA-1.6-13B25.225.627.027.323.425.5
LLaVA-1.6-34B29.428.043.024.925.529.7
LLaVA-OV-0.5B21.026.130.522.525.224.8
LLaVA-OV-7B27.926.136.523.425.527.7
LLaVA-OV-72B34.726.537.027.328.730.8
Llama-3.2-11B-Vision-Instruct31.030.839.021.126.229.4
Llama-3.2-90B-Vision-Instruct45.023.243.026.331.534.1
Qwen-VL21.431.325.026.324.125.3
Qwen2-VL-72B41.628.439.522.529.032.4
QvQ-72B-Preview43.145.548.027.327.637.8
Qwen2-VL-2B-Instruct26.026.124.527.825.526.0
Qwen2-VL-7B-Instruct36.321.838.520.622.727.9
Qwen2-VL-72B-Instruct39.933.545.223.532.434.9
Qwen2.5-VL-3B-Instruct35.127.544.525.824.831.2
Qwen2.5-VL-7B-Instruct40.526.639.024.029.732.1
Qwen2.5-VL-72B-Instruct53.446.958.025.829.542.3
Cambrian-8B31.324.236.024.029.028.9
Cambrian-13B24.825.639.524.421.026.5
Pangea-7B30.528.935.024.425.228.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.765, + 0.828, + 0.793 + ], + "angle": 0, + "content": "Table 7: Performance (%) of various models with Chain of Thoughts (CoT) on VISUALPUZZLES." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.345, + 0.048 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.277, + 0.825, + 0.707 + ], + "angle": 0, + "content": "
ModelAlgorithmicAnalogicalDeductiveInductiveSpatialOverall
Random Choice25.025.025.025.025.025.0
Human (95th Percentile)100.0100.0100.081.6100.089.3
Human (50th Percentile)88.066.080.050.090.075.0
Human (5th Percentile)68.125.037.00.059.157.5
Proprietary Models
GPT-4o40.834.140.524.929.734.0
Gemini-2.0-flash57.641.758.023.035.743.2
Gemini-1.5-Pro51.246.554.024.929.440.8
Open Models
LLaVA-1.5-7B24.424.734.526.825.526.9
LLaVA-1.5-13B24.426.133.526.328.327.6
LLaVA-1.6-7B27.525.132.524.927.327.4
LLaVA-1.6-13B21.424.729.528.223.125.0
LLaVA-1.6-34B31.327.343.024.427.629.8
LLaVA-OV-0.5B24.425.637.524.925.527.2
LLaVA-OV-7B27.528.040.524.428.029.4
LLaVA-OV-72B31.723.645.021.324.628.8
Llama-3.2-11B-Vision-Instruct27.524.231.026.327.627.3
Llama-3.2-90B-Vision-Instruct38.222.344.525.833.633.1
Qwen-VL23.726.529.527.826.626.6
Qwen2-VL-72B38.928.443.020.629.032.0
QvQ-72B-Preview44.843.644.026.830.837.8
Qwen2-VL-2B-Instruct31.729.440.523.931.531.3
Qwen2-VL-7B-Instruct33.624.246.022.526.230.2
Qwen2-VL-72B-Instruct40.530.346.025.429.434.2
Qwen2.5-VL-3B-Instruct36.326.147.025.822.431.0
Qwen2.5-VL-7B-Instruct38.223.751.524.931.133.7
Qwen2.5-VL-72B-Instruct43.140.351.525.433.738.6
Cambrian-8B25.220.435.023.020.624.5
Cambrian-13B23.328.036.524.926.227.4
Pangea-7B32.423.738.528.732.531.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.717, + 0.825, + 0.744 + ], + "angle": 0, + "content": "Table 8: Performance (%) of various models with Multiple Choice Direct prompting on VISUALPUZZLES." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.345, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.104, + 0.517, + 0.12 + ], + "angle": 0, + "content": "E.2 Example Knowledge Checklist Question" + }, + { + "type": "title", + "bbox": [ + 0.18, + 0.137, + 0.548, + 0.153 + ], + "angle": 0, + "content": "Example Knowledge Checklist Question (MMMU)" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.158, + 0.76, + 0.173 + ], + "angle": 0, + "content": "- Question: Explain the Arbitrage Pricing Theory (APT) model and its purpose in finance." + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.173, + 0.818, + 0.224 + ], + "angle": 0, + "content": "- Answer: The Arbitrage Pricing Theory (APT) model is a financial theory that estimates the expected return on an asset based on the asset's sensitivity to various macroeconomic factors. It is used to determine the fair price of an asset by considering multiple factors that could affect its return, as opposed to relying on a single market index as in the Capital Asset Pricing Model (CAPM)." + }, + { + "type": "list", + "bbox": [ + 0.176, + 0.158, + 0.818, + 0.224 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.18, + 0.239, + 0.611, + 0.255 + ], + "angle": 0, + "content": "Example Knowledge Checklist Question (VISUALPUZZLES)" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.261, + 0.624, + 0.274 + ], + "angle": 0, + "content": "- Question: What is the definition of distance in a geometric context?" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.274, + 0.815, + 0.288 + ], + "angle": 0, + "content": "- Answer: Distance in a geometric context refers to the measurement of space between two points." + }, + { + "type": "list", + "bbox": [ + 0.176, + 0.261, + 0.815, + 0.288 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.312, + 0.525, + 0.327 + ], + "angle": 0, + "content": "E.3 Knowledge Checklist Human Annotation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.339, + 0.825, + 0.381 + ], + "angle": 0, + "content": "We asked two human annotators to manually verify and correct the knowledge checklist questions and gave them the following instructions. The inter-annotator agreement rate is \\(87.8\\%\\)." + }, + { + "type": "title", + "bbox": [ + 0.18, + 0.394, + 0.415, + 0.408 + ], + "angle": 0, + "content": "Human Annotation Instructions" + }, + { + "type": "text", + "bbox": [ + 0.177, + 0.412, + 0.662, + 0.426 + ], + "angle": 0, + "content": "You are given a json file, where each item contains the following elements:" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.426, + 0.432, + 0.438 + ], + "angle": 0, + "content": "- Question: a multiple-choice question." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.438, + 0.613, + 0.451 + ], + "angle": 0, + "content": "- Answer: the answer to the question with an optional explanation." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.451, + 0.817, + 0.476 + ], + "angle": 0, + "content": "- Knowledge Concept Checklist: a list of question-answer pairs, where each question in the list is intended to represent a distinct knowledge concept necessary for solving the Question." + }, + { + "type": "list", + "bbox": [ + 0.178, + 0.426, + 0.817, + 0.476 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.177, + 0.487, + 0.818, + 0.514 + ], + "angle": 0, + "content": "You task is to annotate the knowledge concept checklists generated by a model. You should carefully evaluate each question-answer pair based on the following criteria:" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.514, + 0.818, + 0.538 + ], + "angle": 0, + "content": "1. Necessity: Is the question genuinely necessary for solving the problem? If not, then remove the question." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.538, + 0.819, + 0.564 + ], + "angle": 0, + "content": "2. Repetition: Check if any questions are repetitive or duplicate existing questions within the list. If the question is repetitive or duplicate, then remove the question." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.564, + 0.818, + 0.589 + ], + "angle": 0, + "content": "3. Completeness: Ensure no critical knowledge concepts required to solve the problem are missing, and identify if any additional important questions should have been included." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.589, + 0.818, + 0.614 + ], + "angle": 0, + "content": "4. Correctness: Verify whether the provided answers are accurate. Revise the checklist in case of incorrect checklist QA pairs." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.614, + 0.818, + 0.652 + ], + "angle": 0, + "content": "5. Knowledge v.s. Skills: Ensure each question explicitly evaluates a knowledge concept rather than testing skills or problem-solving techniques. Remove any questions that primarily evaluate skills instead of knowledge." + }, + { + "type": "list", + "bbox": [ + 0.178, + 0.514, + 0.819, + 0.652 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.68, + 0.411, + 0.698 + ], + "angle": 0, + "content": "F Reasoning Complexity" + }, + { + "type": "title", + "bbox": [ + 0.18, + 0.72, + 0.585, + 0.736 + ], + "angle": 0, + "content": "Instruction Prompt to Solve Questions in Detailed Steps" + }, + { + "type": "text", + "bbox": [ + 0.177, + 0.741, + 0.346, + 0.756 + ], + "angle": 0, + "content": " < \\text{Imoge}>\\)" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.756, + 0.818, + 0.78 + ], + "angle": 0, + "content": "Solve this question with First Order Logic. Write out each thinking step explicitly, do not skip steps. In your response, begin each step with ____STEP_START__." + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.781, + 0.305, + 0.793 + ], + "angle": 0, + "content": "step \\(< \\) step_num>" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.821, + 0.541, + 0.838 + ], + "angle": 0, + "content": "G Comparison with Other Benchmarks" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.854, + 0.827, + 0.927 + ], + "angle": 0, + "content": "Figure 9 provides a comparative analysis between VISUALPUZZLES and several widely-used benchmarks for multimodal reasoning, visualizing the knowledge requirement and reasoning complexity of each benchmark. VISUALPUZZLES has high reasoning complexity and low knowledge requirement, with an aim to disentangle multimodal reasoning from domain-specific knowledge to evaluate general reasoning abilities in non-expert settings." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "table", + "bbox": [ + 0.177, + 0.101, + 0.825, + 0.257 + ], + "angle": 0, + "content": "
DatasetSizeReasoning LoadKnowledge Requirement% Easy Words Question TypeAnswer Type
LogiQA0.7KHeavyLight52.0TextText
GSM8K8.5KHeavyHeavy54.0TextText
WikiDiverse0.8KLightHeavy35.8Image+TextText
MathVista6.1KHeavyHeavy51.9Image+TextText
MMMU11.5KHeavyHeavy46.4Image+TextText
MATH-Vision3.0KHeavyHeavy53.8Image+TextImage+Text
MathVerse2.6KHeavyHeavy38.2Image+TextText
LogicBench1.5KHeavyLight53.6TextText
LogicVista0.4KHeavyHeavy41.2Image+TextImage
NaturalBench10KLightLight52.5Image+TextText
VISUALPUZZLES1.2KHeavyLight54.1Image+TextImage+Text
" + }, + { + "type": "table_caption", + "bbox": [ + 0.233, + 0.265, + 0.766, + 0.281 + ], + "angle": 0, + "content": "Table 9: Comparison of other existing benchmarks with VISUALPUZZLES" + }, + { + "type": "image", + "bbox": [ + 0.336, + 0.301, + 0.662, + 0.446 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.183, + 0.455, + 0.816, + 0.473 + ], + "angle": 0, + "content": "Figure 9: Comparison between VISUALPUZZLES and several widely-used benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.502, + 0.828, + 0.588 + ], + "angle": 0, + "content": "Table 10 compare the performance of various model families across MathVista, MMMU, and VISUALPUZZLES. Both MathVista and MMMU are benchmarks that have a heavy emphasis on both knowledge and reasoning, whereas VISUALPUZZLES assess models on domain-disentangled multimodal reasoning alone. We found that success on knowledge-intensive multimodal reasoning benchmarks as MathVista and MMMU does not always carry over to VISUALPUZZLES that emphasize reasoning rather than extensive pre-trained knowledge." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.613, + 0.396, + 0.631 + ], + "angle": 0, + "content": "H Additional Analysis" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.648, + 0.444, + 0.665 + ], + "angle": 0, + "content": "H.1 Proprietary V.S. Open Models" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.677, + 0.827, + 0.79 + ], + "angle": 0, + "content": "From Table 2, proprietary models (e.g., o4-mini and Claude-3.7-Sonnet) consistently achieve higher overall accuracy than most open-source models on VISUALPUZZLES. However, some open models also show competitive or even higher performance in both the overall accuracy and specific reasoning categories. For instance, Qwen2.5-VL-72B-Instruct demonstrates higher performance than GPT-4o on algorithmic reasoning, deductive reasoning, spatial reasoning, and overall accuracy. This indicates that while proprietary models currently have leading performance, open models are also rapidly improving on multimodal reasoning capabilities." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.812, + 0.53, + 0.829 + ], + "angle": 0, + "content": "H.2 Reasoning Category and Difficulty Levels" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.84, + 0.828, + 0.927 + ], + "angle": 0, + "content": "Figure 11 and Figure 10 present complementary views of human accuracy against three representative models: o1 (one of the best-performing proprietary models), Qwen2.5-VL72B-Instruct (the strongest Qwen-based open model), and Llama-3.2-90B-Vision-Instruct (the strongest Llama-based open model). Specifically, Figure 10 compares performance across difficulty levels for each reasoning category, while Figure 11 compares performance across categories within each difficulty level." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.345, + 0.048 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "table", + "bbox": [ + 0.219, + 0.102, + 0.777, + 0.519 + ], + "angle": 0, + "content": "
ModelMathVistaMMMUVISUALPUZZLES
Human60.388.680.1
o173.978.251.8
GPT-4o63.869.141.1
Gemini-2.0-Flash-71.745.0
Gemini-1.5-Pro63.962.245.4
Claude-3.5-Sonnet67.768.342.4
Claude-3.7-Sonnet-71.848.3
Claude-3.7-Sonnet (Thinking)-75.048.3
LLaVA-1.5-7B-36.226.9
LLaVA-1.5-13B27.636.427.6
LLaVA-NeXT-7B35.834.627.4
LLaVA-NeXT-13B36.235.325.3
LLaVA-NeXT-34B46.551.129.8
LLaVA-OV-0.5B34.831.427.2
LLaVA-OV-7B63.248.829.4
LLaVA-OV-72B67.556.831.8
Llama-3.2-11B-Vision-Instruct51.550.729.4
Llama-3.2-90B-Vision-Instruct57.360.334.3
Qwen2-VL-72B70.564.532.1
QvQ-72B-Preview71.470.337.9
Qwen2-VL-2B-Instruct43.041.131.3
Qwen2-VL-7B-Instruct58.254.130.2
Qwen2-VL-72B-Instruct70.564.534.9
Qwen2.5-VL-3B-Instruct62.353.131.2
Qwen2.5-VL-7B-Instruct68.258.633.7
Qwen2.5-VL-72B-Instruct74.870.242.3
Cambrian-8B49.042.728.5
Cambrian-13B48.040.027.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.174, + 0.529, + 0.823, + 0.556 + ], + "angle": 0, + "content": "Table 10: Comparison of other MathVista and MMMU with VISUALPUZZLES on human and SOTA models" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.604, + 0.824, + 0.675 + ], + "angle": 0, + "content": "Humans consistently outperform all models across categories and difficulty levels, often by large margins. Notably, human performance remains high and relatively stable in the algorithmic, deductive, and spatial categories, even on hard questions. While accuracy does decline in analogical and inductive reasoning as difficulty increases, humans still maintain a clear advantage over models." + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.681, + 0.825, + 0.766 + ], + "angle": 0, + "content": "In contrast, model performance declines sharply as difficulty increases, especially for open-source models. Accuracy of Llama-3.2-90B-Vision-Instruct on hard analogical tasks drops to just \\(10\\%\\). Even one of the strongest proprietary models, o1, while more robust, still lags significantly behind humans, particularly on analogical, inductive, and spatial tasks. On easy tasks, some models perform competitively in certain categories, but this advantage largely disappears on medium and hard questions." + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.771, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Interestingly, these models maintain a generally stable performance on algorithmic and deductive reasoning. For o1 and Qwen2.5-VL-72B-Instruct, their performances on algorithmic reasoning even go up for more difficult tasks, whereas human performance degraded as the difficulty level increases. However, all models, including o1, perform the worse at analogical, inductive and spatial reasoning in general, especially as the difficulty level increases. This suggests that models are relatively better at tasks requiring structured, rule-based algorithmic processing, while their performance degrades more steeply in tasks requiring relational abstraction (analogical), pattern induction (inductive), and visual understanding (spatial), particularly as the difficulty level increases. In summary, these results indicate that while some models exhibit promising performance on structured and easier reasoning tasks, multimodal models still struggle with abstract and complex reasoning, particularly" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.101, + 0.825, + 0.247 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.247, + 0.825, + 0.391 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.392, + 0.825, + 0.535 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.536, + 0.825, + 0.68 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.681, + 0.825, + 0.826 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.836, + 0.829, + 0.896 + ], + "angle": 0, + "content": "Figure 10: Comparison of accuracy across different reasoning categories for human participants, one of the best performing proprietary models o1, the best performing Qwen-based open model Qwen2.5-VL-72B-Instruct, and the best performing Llama-based open model Llama-3.2-90B-Vision-Instruct, measured on difficulty levels." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "image", + "bbox": [ + 0.175, + 0.103, + 0.825, + 0.263 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.264, + 0.825, + 0.426 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.428, + 0.825, + 0.59 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.6, + 0.829, + 0.66 + ], + "angle": 0, + "content": "Figure 11: Comparison of accuracy across different difficulty levels for human participants, one of the best performing proprietary models o1, the best performing Qwen-based open model Qwen2.5-VL-72B-Instruct, and the best performing Llama-based open model Llama3.2-90B-Vision-Instruct, measured across reasoning categories." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.684, + 0.825, + 0.714 + ], + "angle": 0, + "content": "when difficulty increases. Bridging the gap between model and human reasoning remains a critical challenge." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.73, + 0.482, + 0.746 + ], + "angle": 0, + "content": "H.3 Option Types and Difficulty Levels" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.756, + 0.827, + 0.814 + ], + "angle": 0, + "content": "Figure 12 compares human accuracy against three representative models, o1 (one of the best-performing proprietary models), Qwen2.5-VL-72B-Instruct (the strongest Qwen-based open model), and Llama-3.2-90B-Vision-Instruct (the strongest Llama-based open model), across different difficulty levels, separately for textual and visual answer options." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.819, + 0.825, + 0.89 + ], + "angle": 0, + "content": "Across all participants and models, we observe a consistent pattern: text-based options result in higher accuracy than image-based options, with the performance gap widening as task difficulty increases. This trend holds even for human participants, whose accuracy drops from \\(92\\%\\) to \\(40\\%\\) on visual options when moving from easy to hard tasks, compared to a much smaller drop on text-based ones (\\(93\\%\\) to \\(73\\%\\))." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.927 + ], + "angle": 0, + "content": "For models, the gap is even more pronounced. For instance, Qwen2.5-VL-72B-Instruct achieves \\(58\\%\\) accuracy on hard questions with text options, but only \\(20\\%\\) when image" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "image", + "bbox": [ + 0.175, + 0.103, + 0.498, + 0.231 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.274, + 0.232, + 0.43, + 0.241 + ], + "angle": 0, + "content": "Owen2.5-VL-72B-Instruct" + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.103, + 0.822, + 0.231 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.591, + 0.232, + 0.773, + 0.241 + ], + "angle": 0, + "content": "Llama-3.2-90B-Vision-Instruct" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.241, + 0.495, + 0.36 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.523, + 0.242, + 0.822, + 0.36 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.37, + 0.828, + 0.429 + ], + "angle": 0, + "content": "Figure 12: Comparison of accuracy across different difficulty levels for human participants, one of the best performing proprietary model o1, the best performing Qwen-based open model Qwen2.5-VL-72B-Instruct, and the best performing Llama-based open model Llama3.2-90B-Vision-Instruct, measured on textual v.s. visual option types." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.453, + 0.828, + 0.552 + ], + "angle": 0, + "content": "options are used. o1 and Llama-3.2-90B-Vision-Instruct exhibit similar drops, suggesting a broad weakness in multi-image reasoning and visual option discrimination. These findings suggest that image-based answer options introduce significant additional complexity, requiring models not just to understand the question but to reason over multiple visual cues. This capability is essential for real-world tasks such as product selection, recommendation, and visual planning, where their decision-making process often depends on comparing visual content." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.558, + 0.828, + 0.658 + ], + "angle": 0, + "content": "However, most pretraining datasets and benchmarks have traditionally emphasized textual QA formats, with far fewer examples involving visual options or structured visual comparisons. As a result, models may lack the inductive bias or learned attention mechanisms to handle visual alternatives effectively. These results highlight an important direction for future work: expanding and diversifying training corpora to include multi-choice visual reasoning tasks, and developing architectures that are explicitly designed to process and compare visual candidates, especially under challenging conditions." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.673, + 0.407, + 0.691 + ], + "angle": 0, + "content": "H.4 Case Study of Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.699, + 0.827, + 0.743 + ], + "angle": 0, + "content": "Figure 13 shows a case study demonstrating the similarity in structure and reasoning strategy between Claude-3.7-Sonnet and Claude-3.7-Sonnet-Thinking. Average textual similarity between model responses of these two models on VISUALPUZZLES is 0.9." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.759, + 0.327, + 0.775 + ], + "angle": 0, + "content": "H.5 Impact of CoT" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.785, + 0.465, + 0.925 + ], + "angle": 0, + "content": "Table 11 compares model performance under two prompting strategies: direct multiple-choice prompt vs. Chain-of-Thought (CoT) prompt. We observe that proprietary models and larger open models \\((\\geq 72\\mathrm{B})\\) benefit from CoT, while others show little to no improvement or even a decline in performance with CoT. For instance, both GPT-4o and Qwen2.5-VL-72B-Instruct show more than \\(20\\%\\) in" + }, + { + "type": "table", + "bbox": [ + 0.472, + 0.748, + 0.832, + 0.902 + ], + "angle": 0, + "content": "
ModelDirectCoT
GPT-4o34.041.6
Gemini-1.5-Pro41.045.1
Claude-3.5-Sonnet40.042.5
Qwen2-VL-2B-Instruct31.326.1
Qwen2.5-VL-7B-Instruct33.732.0
Cambrian-13B27.426.5
LLaVA-NeXT-34B29.829.6
Qwen2.5-VL-72B-Instruct38.642.3
LLama-3.2-90B-Vision-Instruct33.333.9
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.471, + 0.903, + 0.829, + 0.934 + ], + "angle": 0, + "content": "Table 11: Comparison of models with Direct Multiple Choice and CoT prompting." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.103, + 0.824, + 0.436 + ], + "angle": 0, + "content": "
Question: Given a pair of chairs arranged at a specific angle, \nselect the option from the four provided that matches the angle of \nthe chairs in the question. (Answer: C)Claude-3.7-Sonnet (With Thinking) \nLet me analyze this problem carefully.
The question shows a “question pair” of chairs at the top of the image, and I need to find which of the four options (A, B, C, D) matches \nthe angle arrangement of those chairs.
Let me examine the question pair first:
- The question pair shows two folding chairs that appear to be facing inward toward each other at approximately a 90-degree angle (the \nchairs are at roughly right angles to each other).
- The chairs in the question pair are shown separated by a colon “:” symbol.
Claude-3.7-Sonnet (Without Thinking) \nI need to analyze the angle relationship between the pair of chairs \nin the question and compare it with the four options provided. \nLooking at the question image at the top: \n- The first chair (on the left) is shown at an angle, with its side \nprofile partially visible \n- The second chair (on the right) is shown more from the front, \nfacing more directly toward the viewer \n- The chairs are positioned at roughly a 90-degree angle to each \nother \nNow examining each option: \nOption A: Two simple chairs with backs. The left chair is shown \nfrom a side angle, while the right chair is shown more from the \nfront. Their orientation appears to be at approximately a 90-degree \nangle to each other. \nOption B: Two backless chairs/stools. The left chair has an angled \ndesign and is shown from the side, while the right chair is shown \nmore straight-on. The angle between them doesn’t appear to \nmatch the 90-degree relationship in the question. \nOption C: Two office/drafting chairs with wheels. Both chairs are \nshown from similar angles, with only slight variation in their \norientation. They don’t appear to have the same 90-degree \nrelationship as the question chairs. \nOption D: Two bar stools with round seats and circular bases. Both \nare shown from similar angles with minimal difference in orientation. \nThey don’t demonstrate the 90-degree relationship seen in the \nquestion. \nComparing all options to the question image, Option A most \nclosely matches the angular relationship between the two chairs in \nthe question. \nAnswer: ANow let me examine each option: \nOption A: \n- Shows two simple chairs with backrests \n- The chairs are facing each other at approximately a 90-degree angle \n- The arrangement looks similar to the question pair in terms of the angle
Option B: \n- Shows two chairs with different designs - one appears to be a more angled/tilted chair and the other is more upright \n- They don’t appear to be at the same angle relationship as the question pair
Option C: \n- Shows two office/drafting chairs with wheels and height adjustment \n- The chairs appear to be facing each other at approximately a 90-degree angle \n- The arrangement looks similar to the question pair in terms of angle
Option D: \n- Shows two bar-style chairs with round seats and central pedestals \n- The chairs appear to be directly facing each other (180-degree relationship) \n- This doesn’t match the angle relationship in the question pair
Based on the angle relationship, both Options A and C show chairs at approximately a 90-degree angle to each other, similar to the \nquestion pair. However, Option A’s chair style and arrangement seems closer to the question pair’s folding chairs.
So, looking at the angle between the chairs, which is the key aspect of this question, Options A and C both show approximately \n90-degree angles between chairs, while Option B shows chairs at different angles, and Option D shows chairs directly facing each other. \nSince the question specifically asks about the angle match, I need to determine which option best matches the exact angle of the \nquestion pair.
Looking closely again: \nThe question pair shows chairs that are facing slightly inward, at approximately 90 degrees. \nOption A shows a similar angle relationship of roughly 90 degrees. \nOption C also shows a similar angle relationship of roughly 90 degrees. \nThis is a bit challenging with the 2D representation, but I think Option A most closely matches the angle relationship in the question pair.
Answer: A
" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.44, + 0.825, + 0.482 + ], + "angle": 0, + "content": "Figure 13: Case Study showing the similarity in structure and reasoning strategy between Claude-3.7-Sonnet-Thinking and Claude-3.7-Sonnet. Similarity between model responses of these two models on VISUALPUZZLES is 0.9." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.509, + 0.827, + 0.581 + ], + "angle": 0, + "content": "creases in performance when using CoT. In contrast, several smaller models, such as Qwen2-VL-2B-Instruct and Cambrian-13B, exhibit decreased accuracy with CoT prompting. These results suggest that CoT can indeed enhance the reasoning capability of larger models whereas it may introduce unnecessary complexity or confusion for smaller models and thus decreasing performance." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.102, + 0.307, + 0.121 + ], + "angle": 0, + "content": "I Case Study" + }, + { + "type": "title", + "bbox": [ + 0.421, + 0.151, + 0.571, + 0.168 + ], + "angle": 0, + "content": "Algorithmic (Easy)" + }, + { + "type": "image", + "bbox": [ + 0.597, + 0.152, + 0.617, + 0.166 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.176, + 0.409, + 0.27 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.421, + 0.193, + 0.807, + 0.219 + ], + "angle": 0, + "content": "Question: There are seven squares in the picture. How many more triangles than squares are there in the picture?" + }, + { + "type": "text", + "bbox": [ + 0.423, + 0.22, + 0.609, + 0.233 + ], + "angle": 0, + "content": "Options: (A) 1 (B) 2 (C) 3 (D) 4" + }, + { + "type": "text", + "bbox": [ + 0.423, + 0.239, + 0.538, + 0.253 + ], + "angle": 0, + "content": "Ground Truth: (C)" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.277, + 0.82, + 0.304 + ], + "angle": 0, + "content": "Claude-3.7-Sonnet-Thinking: I need to count the number of squares and triangles in the given picture, and then find how many more triangles there are than squares." + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.315, + 0.366, + 0.327 + ], + "angle": 0, + "content": "First, let me count the squares:" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.328, + 0.684, + 0.342 + ], + "angle": 0, + "content": "1. I can see 7 small squares arranged in a triangular pattern within the large triangle." + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.352, + 0.373, + 0.365 + ], + "angle": 0, + "content": "Now, let me count the triangles:" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.366, + 0.53, + 0.378 + ], + "angle": 0, + "content": "1. There's the large outer triangle that contains everything." + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.378, + 0.694, + 0.391 + ], + "angle": 0, + "content": "2. There are small triangles formed by the division of the large triangle by the squares." + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.403, + 0.445, + 0.416 + ], + "angle": 0, + "content": "Let me count these small triangles carefully:" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.416, + 0.692, + 0.428 + ], + "angle": 0, + "content": "- Along the bottom row, I see 4 triangles (formed by the squares and the bottom edge)" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.429, + 0.748, + 0.44 + ], + "angle": 0, + "content": "- On the left side of the large triangle, I see 3 triangles (formed by the squares and the left edge)" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.441, + 0.767, + 0.453 + ], + "angle": 0, + "content": "- On the right side of the large triangle, I see 3 triangles (formed by the squares and the right edge)" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.454, + 0.498, + 0.466 + ], + "angle": 0, + "content": "- Don't forget the large outer triangle itself: 1 triangle" + }, + { + "type": "list", + "bbox": [ + 0.18, + 0.403, + 0.767, + 0.466 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.477, + 0.405, + 0.491 + ], + "angle": 0, + "content": "So in total: \\( 4 + 3 + 3 + 1 = 11 \\) triangles" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.502, + 0.361, + 0.514 + ], + "angle": 0, + "content": "Now I can find the difference:" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.515, + 0.498, + 0.528 + ], + "angle": 0, + "content": "Number of triangles - Number of squares \\(= 11 - 7 = 4\\)" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.54, + 0.517, + 0.554 + ], + "angle": 0, + "content": "So there are 4 more triangles than squares in the picture." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.565, + 0.439, + 0.578 + ], + "angle": 0, + "content": "Looking at the options, the answer is (D) 4." + }, + { + "type": "title", + "bbox": [ + 0.18, + 0.578, + 0.279, + 0.59 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.59, + 0.811, + 0.604 + ], + "angle": 0, + "content": "Looking at the image, I need to count the total number of squares and triangles, then find their difference." + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.615, + 0.343, + 0.627 + ], + "angle": 0, + "content": "First, I'll count the squares:" + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.628, + 0.675, + 0.641 + ], + "angle": 0, + "content": "- I can see 7 small squares arranged in a triangular pattern inside the large triangle." + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.652, + 0.352, + 0.665 + ], + "angle": 0, + "content": "Next, I'll count the triangles:" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.666, + 0.535, + 0.678 + ], + "angle": 0, + "content": "1. There is the large outer triangle that contains everything." + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.679, + 0.615, + 0.69 + ], + "angle": 0, + "content": "2. Along the bottom edge of the large triangle, there are 4 small triangles." + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.691, + 0.592, + 0.703 + ], + "angle": 0, + "content": "3. Along the left edge of the large triangle, there are 3 small triangles." + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.704, + 0.602, + 0.716 + ], + "angle": 0, + "content": "4. Along the right edge of the large triangle, there are 3 small triangles." + }, + { + "type": "list", + "bbox": [ + 0.18, + 0.666, + 0.615, + 0.716 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.727, + 0.504, + 0.741 + ], + "angle": 0, + "content": "Total number of triangles \\(= 1 + 4 + 3 + 3 = 11\\) triangles" + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.752, + 0.595, + 0.765 + ], + "angle": 0, + "content": "Now I can calculate how many more triangles than squares there are:" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.766, + 0.429, + 0.778 + ], + "angle": 0, + "content": "11 triangles - 7 squares = 4 more triangles" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.79, + 0.248, + 0.801 + ], + "angle": 0, + "content": "Answer: D" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.807, + 0.378, + 0.82 + ], + "angle": 0, + "content": "Error Category: Reasoning Error" + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.82, + 0.819, + 0.87 + ], + "angle": 0, + "content": "Error Reason: The model gets the answer wrong because it miscounts the number of triangles, specifically double-counting the triangles formed by small triangles and squares. The response incorrectly conclude there are 11 triangles, but a careful manual inspection shows only 10 distinct triangles, making the correct difference 3, not 4." + }, + { + "type": "image_caption", + "bbox": [ + 0.227, + 0.89, + 0.769, + 0.907 + ], + "angle": 0, + "content": "Figure 14: A sample error case of Algorithmic Reasoning (difficulty: Easy)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.421, + 0.112, + 0.57, + 0.13 + ], + "angle": 0, + "content": "Algorithmic (Easy)" + }, + { + "type": "image", + "bbox": [ + 0.594, + 0.113, + 0.614, + 0.127 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.188, + 0.161, + 0.272, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.286, + 0.204, + 0.304, + 0.216 + ], + "angle": 0, + "content": "(B)" + }, + { + "type": "image", + "bbox": [ + 0.307, + 0.149, + 0.366, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.385, + 0.149, + 0.471, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.482, + 0.204, + 0.499, + 0.216 + ], + "angle": 0, + "content": "(D)" + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.149, + 0.562, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.568, + 0.142, + 0.794, + 0.203 + ], + "angle": 0, + "content": "Question: In the Kangaroo constellation, all stars have a number greater than 3 and their sum is 20. Which option shows the Kangaroo constellation?" + }, + { + "type": "text", + "bbox": [ + 0.569, + 0.209, + 0.681, + 0.223 + ], + "angle": 0, + "content": "Ground Truth: (B)" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.236, + 0.812, + 0.258 + ], + "angle": 0, + "content": "Claude-3.7-Sonnet-Thinking: The question asks about the Kangaroo constellation, which has the following properties:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.258, + 0.382, + 0.268 + ], + "angle": 0, + "content": "1. All stars have a number greater than 3" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.268, + 0.338, + 0.277 + ], + "angle": 0, + "content": "2. The sum of all numbers is 20" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.258, + 0.382, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.287, + 0.516, + 0.299 + ], + "angle": 0, + "content": "Let me check each option to see which one satisfies these conditions:" + }, + { + "type": "title", + "bbox": [ + 0.186, + 0.308, + 0.244, + 0.318 + ], + "angle": 0, + "content": "Option (A):" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.319, + 0.316, + 0.328 + ], + "angle": 0, + "content": "Stars have values: 5, 4, 7, 6" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.329, + 0.46, + 0.338 + ], + "angle": 0, + "content": "- Are all numbers greater than 3? Yes (5, 4, 7, 6 are all \\(>3\\))" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.339, + 0.304, + 0.347 + ], + "angle": 0, + "content": "- Sum: \\( 5 + 4 + 7 + 6 = {22} \\)" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.329, + 0.46, + 0.347 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.348, + 0.392, + 0.358 + ], + "angle": 0, + "content": "This doesn't match the required sum of 20." + }, + { + "type": "title", + "bbox": [ + 0.186, + 0.368, + 0.243, + 0.378 + ], + "angle": 0, + "content": "Option (B):" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.379, + 0.305, + 0.387 + ], + "angle": 0, + "content": "Stars have values: 5, 8, 7" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.389, + 0.449, + 0.398 + ], + "angle": 0, + "content": "- Are all numbers greater than 3? Yes (5, 8, 7 are all \\(>3\\))" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.399, + 0.286, + 0.407 + ], + "angle": 0, + "content": "- Sum: \\( 5 + 8 + 7 = {20} \\)" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.379, + 0.449, + 0.407 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.408, + 0.575, + 0.419 + ], + "angle": 0, + "content": "This matches both conditions: all numbers are greater than 3, and they sum to 20." + }, + { + "type": "title", + "bbox": [ + 0.186, + 0.428, + 0.243, + 0.438 + ], + "angle": 0, + "content": "Option (C):" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.439, + 0.326, + 0.447 + ], + "angle": 0, + "content": "Stars have values: 5, 3, 7, 2, 8" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.448, + 0.46, + 0.458 + ], + "angle": 0, + "content": "- Are all numbers greater than 3? No (3 and 2 are not \\(>3\\))" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.459, + 0.32, + 0.466 + ], + "angle": 0, + "content": "- Sum: \\( 5 + 3 + 7 + 2 + 8 = {25} \\)" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.448, + 0.46, + 0.466 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.468, + 0.59, + 0.479 + ], + "angle": 0, + "content": "This doesn't satisfy the first condition about all stars having numbers greater than 3." + }, + { + "type": "title", + "bbox": [ + 0.186, + 0.488, + 0.243, + 0.498 + ], + "angle": 0, + "content": "Option (D):" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.499, + 0.315, + 0.507 + ], + "angle": 0, + "content": "Stars have values: 5, 1, 4, 9" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.508, + 0.424, + 0.518 + ], + "angle": 0, + "content": "- Are all numbers greater than 3? No (1 is not \\(>3\\))" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.519, + 0.304, + 0.527 + ], + "angle": 0, + "content": "- Sum: \\( 5 + 1 + 4 + 9 = {19} \\)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.528, + 0.361, + 0.538 + ], + "angle": 0, + "content": "This doesn't satisfy either condition." + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.508, + 0.424, + 0.538 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.548, + 0.543, + 0.558 + ], + "angle": 0, + "content": "So, only Option (B) satisfies both conditions of the Kangaroo constellation." + }, + { + "type": "title", + "bbox": [ + 0.187, + 0.559, + 0.265, + 0.568 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.568, + 0.411, + 0.578 + ], + "angle": 0, + "content": "Solving the Kangaroo Constellation Problem" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.587, + 0.635, + 0.599 + ], + "angle": 0, + "content": "I need to identify the constellation where all stars have numbers greater than 3 and sum to 20." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.607, + 0.326, + 0.619 + ], + "angle": 0, + "content": "Let me examine each option:" + }, + { + "type": "title", + "bbox": [ + 0.186, + 0.628, + 0.257, + 0.638 + ], + "angle": 0, + "content": "Option (A)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.639, + 0.256, + 0.647 + ], + "angle": 0, + "content": "Stars: 5, 4, 7, 6" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.649, + 0.296, + 0.657 + ], + "angle": 0, + "content": "- All numbers \\( > 3 \\)? Yes" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.658, + 0.303, + 0.666 + ], + "angle": 0, + "content": "- Sum: \\( 5 + 4 + 7 + 6 = {22} \\)" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.649, + 0.303, + 0.666 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.668, + 0.37, + 0.678 + ], + "angle": 0, + "content": "Doesn't match the required sum of 20." + }, + { + "type": "title", + "bbox": [ + 0.186, + 0.688, + 0.256, + 0.698 + ], + "angle": 0, + "content": "Option (B)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.699, + 0.244, + 0.707 + ], + "angle": 0, + "content": "Stars: 5, 8, 7" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.709, + 0.295, + 0.717 + ], + "angle": 0, + "content": "- All numbers \\( > 3 \\)? Yes" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.718, + 0.286, + 0.726 + ], + "angle": 0, + "content": "- Sum: \\( 5 + 8 + 7 = {20} \\)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.728, + 0.323, + 0.738 + ], + "angle": 0, + "content": "Matches both requirements!" + }, + { + "type": "title", + "bbox": [ + 0.187, + 0.748, + 0.256, + 0.758 + ], + "angle": 0, + "content": "Option (C)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.759, + 0.266, + 0.767 + ], + "angle": 0, + "content": "Stars: 5, 3, 7, 2, 8" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.768, + 0.372, + 0.777 + ], + "angle": 0, + "content": "- All numbers \\( > 3 \\)? No (3 and 2 are \\( \\leq 3 \\))" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.778, + 0.321, + 0.787 + ], + "angle": 0, + "content": "- Sum: \\( 5 + 3 + 7 + 2 + 8 = 25 \\)" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.768, + 0.372, + 0.787 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.789, + 0.349, + 0.798 + ], + "angle": 0, + "content": "Doesn't satisfy the first condition." + }, + { + "type": "title", + "bbox": [ + 0.186, + 0.808, + 0.257, + 0.819 + ], + "angle": 0, + "content": "Option (D)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.82, + 0.256, + 0.828 + ], + "angle": 0, + "content": "Stars: 5, 1, 4, 9" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.829, + 0.336, + 0.838 + ], + "angle": 0, + "content": "- All numbers \\( > 3 \\)? No (1 is \\( \\leq 3 \\))" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.839, + 0.303, + 0.847 + ], + "angle": 0, + "content": "- Sum: \\( 5 + 1 + 4 + 9 = {19} \\)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.848, + 0.339, + 0.858 + ], + "angle": 0, + "content": "Doesn't satisfy either condition." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.867, + 0.71, + 0.879 + ], + "angle": 0, + "content": "Only option (B) satisfies both requirements: all stars have numbers greater than 3, and their sum is exactly 20." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.888, + 0.24, + 0.897 + ], + "angle": 0, + "content": "Answer: B" + }, + { + "type": "image_caption", + "bbox": [ + 0.221, + 0.913, + 0.776, + 0.93 + ], + "angle": 0, + "content": "Figure 15: A sample correct case of Algorithmic Reasoning (difficulty: Easy)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "28" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.406, + 0.112, + 0.584, + 0.129 + ], + "angle": 0, + "content": "Algorithmic (Medium)" + }, + { + "type": "image", + "bbox": [ + 0.593, + 0.113, + 0.614, + 0.127 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.187, + 0.158, + 0.546, + 0.186 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.57, + 0.143, + 0.791, + 0.168 + ], + "angle": 0, + "content": "Question: How many left hands appear in the image?" + }, + { + "type": "text", + "bbox": [ + 0.572, + 0.168, + 0.753, + 0.181 + ], + "angle": 0, + "content": "Options: (A) 1 (B) 8 (C) 4 (D) 5" + }, + { + "type": "text", + "bbox": [ + 0.571, + 0.187, + 0.683, + 0.201 + ], + "angle": 0, + "content": "Ground Truth: (C)" + }, + { + "type": "title", + "bbox": [ + 0.186, + 0.22, + 0.413, + 0.232 + ], + "angle": 0, + "content": "Claude-3.7-Sonnet-Thinking: " + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.232, + 0.81, + 0.256 + ], + "angle": 0, + "content": "Looking at the image, I can see approximately 10-12 hand icons/emoji in a row. They're all yellow (with one that appears to be skin-colored), and they show various hand gestures." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.265, + 0.81, + 0.289 + ], + "angle": 0, + "content": "To determine which ones are left hands, I need to look at the thumb position. In a left hand, the thumb would typically be on the right side when the palm is facing forward." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.298, + 0.439, + 0.311 + ], + "angle": 0, + "content": "Let me go through each hand from left to right:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.321, + 0.597, + 0.332 + ], + "angle": 0, + "content": "1. Thumbs down gesture - appears to be a left hand with thumb on right side" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.333, + 0.634, + 0.343 + ], + "angle": 0, + "content": "2. Fist or closed hand - orientation is harder to determine but seems to be a left hand" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.344, + 0.488, + 0.354 + ], + "angle": 0, + "content": "3. Open hand or waving hand - appears to be a left hand" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.355, + 0.487, + 0.365 + ], + "angle": 0, + "content": "4. Hand pointing or gesturing - appears to be a left hand" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.366, + 0.507, + 0.377 + ], + "angle": 0, + "content": "5. Open hand with fingers spread - appears to be a left hand" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.377, + 0.747, + 0.388 + ], + "angle": 0, + "content": "6. Fist with extended thumb (like \"thumbs up\" but horizontal) - appears to be a right hand (thumb on left)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.389, + 0.507, + 0.399 + ], + "angle": 0, + "content": "7. Open hand with fingers spread - appears to be a left hand" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.4, + 0.486, + 0.41 + ], + "angle": 0, + "content": "8. Hand making \"OK\" gesture - appears to be a left hand" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.411, + 0.472, + 0.421 + ], + "angle": 0, + "content": "9. Victory/peace sign hand - appears to be a left hand" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.422, + 0.369, + 0.432 + ], + "angle": 0, + "content": "10. Ear or side of face (not a hand)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.433, + 0.501, + 0.444 + ], + "angle": 0, + "content": "11. Hand pointing or gesturing - appears to be a right hand" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.321, + 0.747, + 0.444 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.454, + 0.712, + 0.467 + ], + "angle": 0, + "content": "So counting the left hands, I see approximately 8 left hands. This would correspond to option (B) 8." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.476, + 0.81, + 0.499 + ], + "angle": 0, + "content": "However, I want to be careful because determining left vs. right can sometimes be ambiguous in emoji or simplistic hand representations. Let me double check my count." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.509, + 0.81, + 0.544 + ], + "angle": 0, + "content": "Looking again, I see the thumbs down, several open hands, some gesture hands, and a few that are definitely left hands based on thumb position. The total count of left hands appears to be around 8, though there's some ambiguity with a couple of the representations." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.554, + 0.471, + 0.566 + ], + "angle": 0, + "content": "Based on my analysis, the answer appears to be (B) 8." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.567, + 0.274, + 0.578 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.578, + 0.724, + 0.589 + ], + "angle": 0, + "content": "To solve this question, I need to examine the image carefully and identify which hands are left hands." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.598, + 0.81, + 0.633 + ], + "angle": 0, + "content": "Looking at the row of yellow hand emojis, I need to determine whether each is a left or right hand. For hand emojis, I can identify a left hand by noticing the thumb position - on a left hand, the thumb will typically appear on the right side when the palm is facing forward." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.644, + 0.421, + 0.655 + ], + "angle": 0, + "content": "Going through each emoji from left to right:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.656, + 0.501, + 0.666 + ], + "angle": 0, + "content": "1. Thumbs down - this is a left hand (thumb is on the right)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.667, + 0.36, + 0.676 + ], + "angle": 0, + "content": "2. Fist - appears to be a left hand" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.677, + 0.372, + 0.687 + ], + "angle": 0, + "content": "3. Waving hand - this is a left hand" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.688, + 0.376, + 0.698 + ], + "angle": 0, + "content": "4. Pointing hand - this is a left hand" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.699, + 0.467, + 0.709 + ], + "angle": 0, + "content": "5. Open hand with fingers spread - this is a left hand" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.71, + 0.591, + 0.721 + ], + "angle": 0, + "content": "6. Fist with extended thumb - this appears to be a right hand (thumb on left)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.722, + 0.36, + 0.731 + ], + "angle": 0, + "content": "7. Open hand - this is a left hand" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.732, + 0.391, + 0.742 + ], + "angle": 0, + "content": "8. OK gesture hand - this is a left hand" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.743, + 0.387, + 0.754 + ], + "angle": 0, + "content": "9. Peace sign hand - this is a left hand" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.755, + 0.324, + 0.765 + ], + "angle": 0, + "content": "10. Ear emoji (not a hand)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.766, + 0.359, + 0.777 + ], + "angle": 0, + "content": "11. Pointing gesture - right hand" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.656, + 0.591, + 0.777 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.787, + 0.388, + 0.8 + ], + "angle": 0, + "content": "I count 8 left hands total in the image." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.809, + 0.245, + 0.82 + ], + "angle": 0, + "content": "Answer: B" + }, + { + "type": "title", + "bbox": [ + 0.186, + 0.835, + 0.44, + 0.847 + ], + "angle": 0, + "content": "Error Category: Spatial / Orientation Error" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.848, + 0.81, + 0.896 + ], + "angle": 0, + "content": "Error Reason: The model makes a orientation error by misinterpreting the thumb positions in several hand emojis, leading it to incorrectly classify some right hands as left hands. For example, it counts the pointing hand in the third emoji (which is clearly a right hand) as a left hand. This misclassification results in an overcount of left hands." + }, + { + "type": "image_caption", + "bbox": [ + 0.213, + 0.913, + 0.783, + 0.93 + ], + "angle": 0, + "content": "Figure 16: A sample error case of Algorithmic Reasoning (difficulty: Medium)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "29" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.345, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.406, + 0.112, + 0.584, + 0.129 + ], + "angle": 0, + "content": "Algorithmic (Medium)" + }, + { + "type": "image", + "bbox": [ + 0.208, + 0.134, + 0.357, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.379, + 0.161, + 0.793, + 0.213 + ], + "angle": 0, + "content": "Question: The entire pie seen in the image is divided among several children. Each child receives a piece of pie, and each piece of pie looks identical. What is the maximum possible number of children there? Options: (A) 9 (B) 8 (C) 10 (D) 4" + }, + { + "type": "text", + "bbox": [ + 0.381, + 0.226, + 0.493, + 0.24 + ], + "angle": 0, + "content": "Ground Truth: (B)" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.265, + 0.813, + 0.304 + ], + "angle": 0, + "content": "Claude-3.7-Sonnet-Thinking: I need to determine how many equal-sized pieces this pie can be divided into. Since each child will receive an identical piece, I need to count the maximum number of equal portions visible in the image." + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.314, + 0.81, + 0.34 + ], + "angle": 0, + "content": "Looking at the image, I can see that this appears to be an unusual-looking pie with different colored sections arranged in a circular pattern. It's not a typical pie with simple wedge-shaped slices." + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.35, + 0.81, + 0.389 + ], + "angle": 0, + "content": "From what I can observe, there appears to be a pattern of colored shapes (green, light green, blue, and red) arranged in a circle. To determine how many equal pieces there are, I need to identify the repeating pattern and count how many times it repeats around the circle." + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.4, + 0.81, + 0.437 + ], + "angle": 0, + "content": "The pie seems to have a symmetrical arrangement. Looking carefully, I can see that there appear to be 8 sections around the outer ring that follow a similar pattern. I can see repeated patterns of green and light green shapes with blue and red accents between them." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.449, + 0.324, + 0.461 + ], + "angle": 0, + "content": "Looking at the options:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.462, + 0.22, + 0.473 + ], + "angle": 0, + "content": "(A) 9" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.474, + 0.217, + 0.485 + ], + "angle": 0, + "content": "(B) 8" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.486, + 0.225, + 0.498 + ], + "angle": 0, + "content": "(C) 10" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.499, + 0.218, + 0.51 + ], + "angle": 0, + "content": "(D) 4" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.462, + 0.225, + 0.51 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.522, + 0.81, + 0.547 + ], + "angle": 0, + "content": "Based on my observation of the image, there appear to be 8 similar sections arranged around the circle. This would suggest that option (B) 8 is the correct answer." + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.558, + 0.81, + 0.585 + ], + "angle": 0, + "content": "However, I should double-check by looking for any other patterns or symmetries in the image. It's possible that each section could be further divided, or that there's a different way to interpret the equal pieces." + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.595, + 0.81, + 0.62 + ], + "angle": 0, + "content": "After careful examination, I'm confident that there are 8 identical sections arranged in a circular pattern. Therefore, the maximum number of children who could receive identical pieces of pie would be 8." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.621, + 0.282, + 0.633 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.633, + 0.483, + 0.645 + ], + "angle": 0, + "content": "Determining the Maximum Number of Children" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.656, + 0.81, + 0.681 + ], + "angle": 0, + "content": "To solve this problem, I need to examine the pie in the image and determine how many identical pieces it can be divided into." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.693, + 0.318, + 0.705 + ], + "angle": 0, + "content": "Analysis of the Pie" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.706, + 0.81, + 0.731 + ], + "angle": 0, + "content": "Looking at the image, I can see a circular pie with a pattern of colored sections. The key is to identify how many identical sections are present or could be created." + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.742, + 0.81, + 0.779 + ], + "angle": 0, + "content": "Upon careful examination, I notice that the pie has a symmetrical arrangement with a repeating pattern. The colorful segments (green, light green, red, and blue shapes) appear to form a consistent pattern around the circle." + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.791, + 0.81, + 0.817 + ], + "angle": 0, + "content": "Counting these repeating sections carefully, I can identify 8 identical portions around the circumference of the pie. Each of these sections follows the same pattern of colored shapes." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.828, + 0.275, + 0.839 + ], + "angle": 0, + "content": "Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.84, + 0.81, + 0.865 + ], + "angle": 0, + "content": "Since each child must receive an identical piece, and there are 8 identical sections visible in the pie, the maximum possible number of children would be 8." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.876, + 0.251, + 0.888 + ], + "angle": 0, + "content": "Answer: B" + }, + { + "type": "image_caption", + "bbox": [ + 0.207, + 0.913, + 0.789, + 0.93 + ], + "angle": 0, + "content": "Figure 17: A sample correct case of Algorithmic Reasoning (difficulty: Medium)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "30" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.419, + 0.112, + 0.571, + 0.13 + ], + "angle": 0, + "content": "Algorithmic (Hard)" + }, + { + "type": "image", + "bbox": [ + 0.594, + 0.113, + 0.614, + 0.128 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.214, + 0.135, + 0.351, + 0.247 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.367, + 0.153, + 0.792, + 0.178 + ], + "angle": 0, + "content": "Question: There are 53 residents living in the apartment. How many people live on the second floor?" + }, + { + "type": "text", + "bbox": [ + 0.368, + 0.179, + 0.564, + 0.192 + ], + "angle": 0, + "content": "Options: (A) 5 (B) 8 (C) 10 (D) 13" + }, + { + "type": "text", + "bbox": [ + 0.368, + 0.198, + 0.482, + 0.213 + ], + "angle": 0, + "content": "Ground Truth: (A)" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.248, + 0.813, + 0.266 + ], + "angle": 0, + "content": "Claude-3.7-Sonnet-Thinking: This image shows an apartment building with different sections labeled with numbers. Let me analyze what the numbers represent:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.271, + 0.364, + 0.28 + ], + "angle": 0, + "content": "- The building appears to have multiple floors." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.28, + 0.42, + 0.288 + ], + "angle": 0, + "content": "- The number 27 is shown on the left side of one section (blue)." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.288, + 0.53, + 0.295 + ], + "angle": 0, + "content": "- The number 23 is shown on the right side of a section (also seems to cover two blue floors)." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.295, + 0.486, + 0.303 + ], + "angle": 0, + "content": "- The number 18 is shown on the left side of a pink/red section marked with \"?\"" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.303, + 0.421, + 0.31 + ], + "angle": 0, + "content": "- The number 13 is shown on the right side of the bottom floor." + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.271, + 0.53, + 0.31 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.318, + 0.742, + 0.328 + ], + "angle": 0, + "content": "The question asks how many people live on the second floor, but I need to determine which floor is the second floor and how many people live there." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.333, + 0.351, + 0.342 + ], + "angle": 0, + "content": "Looking at the building from bottom to top:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.342, + 0.43, + 0.349 + ], + "angle": 0, + "content": "1. Bottom floor (blue with windows): labeled with 13 on the right" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.349, + 0.419, + 0.357 + ], + "angle": 0, + "content": "2. Second floor (pink/red with \"?\"): labeled with 18 on the left" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.357, + 0.444, + 0.365 + ], + "angle": 0, + "content": "3. Two middle floors (blue with windows): part of the 23 on the right" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.365, + 0.411, + 0.373 + ], + "angle": 0, + "content": "4. Top floor (blue with windows): labeled with 27 on the left" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.342, + 0.444, + 0.373 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.38, + 0.813, + 0.397 + ], + "angle": 0, + "content": "So it seems the second floor from the bottom is the one marked with \\( ?^{\\prime \\prime} \\) and has 18 on the left side. But the question asks specifically how many people live on the second floor, not how many are associated with the label." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.403, + 0.58, + 0.413 + ], + "angle": 0, + "content": "Let me think about this more carefully. The total number of residents is 53. The numbers shown might be:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.413, + 0.334, + 0.42 + ], + "angle": 0, + "content": "- 27: Number of people on the top floor" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.42, + 0.408, + 0.428 + ], + "angle": 0, + "content": "- 23: Number of people on the two middle floors combined" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.428, + 0.4, + 0.436 + ], + "angle": 0, + "content": "- 18: Number of people on the second floor" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.436, + 0.349, + 0.443 + ], + "angle": 0, + "content": "- 13: Number of people on the bottom floor" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.413, + 0.408, + 0.443 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.45, + 0.458, + 0.459 + ], + "angle": 0, + "content": "Let's check if these add up: \\(27 + 23 + 18 + 13 = 81\\), which is more than 53." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.466, + 0.425, + 0.475 + ], + "angle": 0, + "content": "Wait, I think I'm misinterpreting. The numbers might represent:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.475, + 0.323, + 0.482 + ], + "angle": 0, + "content": "- 27: People on left side (top section)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.482, + 0.35, + 0.49 + ], + "angle": 0, + "content": "- 23: People on right side (upper two floors)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.49, + 0.332, + 0.498 + ], + "angle": 0, + "content": "- 18: People on left side (lower section)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.498, + 0.335, + 0.506 + ], + "angle": 0, + "content": "- 13: People on right side (bottom floor)" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.475, + 0.35, + 0.506 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.512, + 0.657, + 0.522 + ], + "angle": 0, + "content": "So \\(27 + 18 = 45\\) people on the left, and \\(23 + 13 = 36\\) people on the right. That's still \\(45 + 36 = 81\\) people, which doesn't match 53." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.528, + 0.812, + 0.546 + ], + "angle": 0, + "content": "Let me try a different approach. Maybe the 23 represents the number of people on the right side of the building (covering multiple floors), and 18 represents people on another part." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.551, + 0.391, + 0.56 + ], + "angle": 0, + "content": "Actually, looking at the diagram again, it appears that:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.56, + 0.306, + 0.567 + ], + "angle": 0, + "content": "- 27: Left side of the top section" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.567, + 0.33, + 0.575 + ], + "angle": 0, + "content": "- 23: Right side of the top two sections" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.575, + 0.318, + 0.583 + ], + "angle": 0, + "content": "- 18: Left side of the bottom section" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.583, + 0.324, + 0.59 + ], + "angle": 0, + "content": "- 13: Right side of the bottom section" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.56, + 0.33, + 0.59 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.598, + 0.672, + 0.608 + ], + "angle": 0, + "content": "Given the total is 53 residents, and we're trying to find the number on the second floor, let's try to interpret the diagram differently." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.613, + 0.368, + 0.622 + ], + "angle": 0, + "content": "The first (bottom) floor seems to have 13 people." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.622, + 0.319, + 0.63 + ], + "angle": 0, + "content": "The top appears to have 27 people." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.63, + 0.472, + 0.638 + ], + "angle": 0, + "content": "That leaves \\(53 - 13 - 27 = 13\\) people for the second and third floors combined." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.644, + 0.812, + 0.662 + ], + "angle": 0, + "content": "I'm still uncertain, but based on my calculations and the available options, it seems like (D) 13 might be the answer if we interpret the second floor as having all the remaining residents after accounting for the top and bottom floors." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.662, + 0.249, + 0.67 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.67, + 0.352, + 0.677 + ], + "angle": 0, + "content": "Solving the Apartment Residents Problem" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.644, + 0.812, + 0.677 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.683, + 0.612, + 0.693 + ], + "angle": 0, + "content": "Looking at the image, I need to determine how many residents live on the second floor of this apartment building." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.699, + 0.395, + 0.707 + ], + "angle": 0, + "content": "I can see a diagram of a building with several numbers:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.707, + 0.339, + 0.715 + ], + "angle": 0, + "content": "- The top section has \"27\" on the left side" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.715, + 0.375, + 0.723 + ], + "angle": 0, + "content": "- The middle two floors have \"23\" on the right side" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.723, + 0.36, + 0.73 + ], + "angle": 0, + "content": "- There's a section marked with \"18\" on the left." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.731, + 0.352, + 0.739 + ], + "angle": 0, + "content": "- The bottom floor has \"13\" on the right side" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.739, + 0.496, + 0.747 + ], + "angle": 0, + "content": "- The second floor from the bottom is marked with a \"?\" - this is what I need to find" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.707, + 0.496, + 0.747 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.754, + 0.51, + 0.764 + ], + "angle": 0, + "content": "I notice the question states there are 53 residents total living in the apartment building." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.769, + 0.269, + 0.777 + ], + "angle": 0, + "content": "Let me work this out:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.778, + 0.35, + 0.785 + ], + "angle": 0, + "content": "- The top floor appears to have 27 residents" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.785, + 0.365, + 0.793 + ], + "angle": 0, + "content": "- The bottom floor appears to have 13 residents" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.793, + 0.49, + 0.801 + ], + "angle": 0, + "content": "- That means the middle two floors together must have: \\( {53} - {27} - {13} = {13} \\) residents" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.778, + 0.49, + 0.801 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.808, + 0.712, + 0.818 + ], + "angle": 0, + "content": "Looking closely at the image, the second floor is specifically marked with a \"?\" and there are numbers on the sides indicating 13 for that floor." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.823, + 0.352, + 0.832 + ], + "angle": 0, + "content": "Therefore, the second floor has 13 residents." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.839, + 0.23, + 0.847 + ], + "angle": 0, + "content": "Answer: D" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.85, + 0.408, + 0.863 + ], + "angle": 0, + "content": "Error Category: Understanding Error" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.863, + 0.813, + 0.9 + ], + "angle": 0, + "content": "Error Reason: The model makes an image understanding error by misunderstanding what is the meaning of the brackets and numbers, despite all the branching and revalidation. This misunderstanding causes it to iteratively try to understand the meaning of the numbers and ultimately select the wrong answer." + }, + { + "type": "image_caption", + "bbox": [ + 0.226, + 0.913, + 0.771, + 0.93 + ], + "angle": 0, + "content": "Figure 18: A sample error case of Algorithmic Reasoning (difficulty: Hard)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "31" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.419, + 0.112, + 0.57, + 0.13 + ], + "angle": 0, + "content": "Algorithmic (Hard)" + }, + { + "type": "image", + "bbox": [ + 0.594, + 0.113, + 0.614, + 0.127 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.205, + 0.137, + 0.317, + 0.223 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.354, + 0.145, + 0.793, + 0.181 + ], + "angle": 0, + "content": "Question: A bird can only move horizontally or vertically towards a monkey in the grid. The number of ways that the bird can reach the monkey is:" + }, + { + "type": "text", + "bbox": [ + 0.356, + 0.182, + 0.565, + 0.196 + ], + "angle": 0, + "content": "Options: (A) 15 (B) 14 (C) 18 (D) 12" + }, + { + "type": "text", + "bbox": [ + 0.356, + 0.204, + 0.469, + 0.218 + ], + "angle": 0, + "content": "Ground Truth: (A)" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.231, + 0.812, + 0.256 + ], + "angle": 0, + "content": "Claude-3.7-Sonnet-Thinking: Looking at the grid, I need to identify the positions of the bird and the monkey." + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.264, + 0.813, + 0.289 + ], + "angle": 0, + "content": "The bird appears to be at the top of the grid in the second column, specifically at position \\((0,1)\\) if we use a zero-indexed grid counting from the top-left." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.298, + 0.767, + 0.311 + ], + "angle": 0, + "content": "The monkey appears to be at the bottom-right of the grid, at position \\((4,3)\\) using the same coordinate system." + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.32, + 0.81, + 0.356 + ], + "angle": 0, + "content": "To count the number of ways the bird can reach the monkey, I need to consider that the bird can only move horizontally or vertically. This means the bird can only move right or down to reach the monkey from its starting position." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.364, + 0.794, + 0.378 + ], + "angle": 0, + "content": "The total number of valid paths will be the number of ways to arrange the required horizontal and vertical moves." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.387, + 0.466, + 0.398 + ], + "angle": 0, + "content": "From position \\((0,1)\\) to \\((4,3)\\), the bird needs to move:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.399, + 0.381, + 0.41 + ], + "angle": 0, + "content": "- 4 steps down (from row 0 to row 4)" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.41, + 0.415, + 0.422 + ], + "angle": 0, + "content": "- 2 steps right (from column 1 to column 3)" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.399, + 0.415, + 0.422 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.431, + 0.694, + 0.444 + ], + "angle": 0, + "content": "So in total, the bird needs to make \\( 4 + 2 = 6 \\) moves, with 4 being vertical and 2 being horizontal." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.454, + 0.623, + 0.465 + ], + "angle": 0, + "content": "The number of ways to arrange these moves is given by the combination formula:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.465, + 0.645, + 0.477 + ], + "angle": 0, + "content": "C(total moves, horizontal moves) = C(6, 2) = 6! / (2! * 4!) = (6 * 5) / (2 * 1) = 30 / 2 = 15" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.487, + 0.578, + 0.499 + ], + "angle": 0, + "content": "Alternatively, you can calculate \\( C(6, 4) \\) which would give the same result:" + }, + { + "type": "equation", + "bbox": [ + 0.186, + 0.499, + 0.323, + 0.51 + ], + "angle": 0, + "content": "\\[\nC (6, 4) = 6! / (4! * 2!) = 1 5\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.52, + 0.521, + 0.533 + ], + "angle": 0, + "content": "So there are 15 different ways for the bird to reach the monkey." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.542, + 0.495, + 0.554 + ], + "angle": 0, + "content": "Looking at the options, this corresponds to answer (A) 15." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.554, + 0.274, + 0.565 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.565, + 0.362, + 0.577 + ], + "angle": 0, + "content": "Solving the Grid Path Problem" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.587, + 0.813, + 0.611 + ], + "angle": 0, + "content": "In this problem, I need to find the number of ways that a bird can reach a monkey by moving only horizontally or vertically in a grid." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.62, + 0.305, + 0.632 + ], + "angle": 0, + "content": "Looking at the image:" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.632, + 0.745, + 0.644 + ], + "angle": 0, + "content": "- The bird is located at the top of the grid in the second column (position (0,1) if we start counting from 0)" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.644, + 0.524, + 0.655 + ], + "angle": 0, + "content": "- The monkey is located at the bottom-right area (position (4,3))" + }, + { + "type": "list", + "bbox": [ + 0.185, + 0.632, + 0.745, + 0.655 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.665, + 0.408, + 0.676 + ], + "angle": 0, + "content": "Step 1: Determine the required moves" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.677, + 0.471, + 0.687 + ], + "angle": 0, + "content": "From the bird to the monkey, the bird needs to move:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.688, + 0.383, + 0.699 + ], + "angle": 0, + "content": "- 4 steps down (from row 0 to row 4)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.699, + 0.415, + 0.71 + ], + "angle": 0, + "content": "- 2 steps right (from column 1 to column 3)" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.677, + 0.471, + 0.71 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.72, + 0.551, + 0.732 + ], + "angle": 0, + "content": "So the bird must make a total of 6 moves: 4 vertical and 2 horizontal." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.742, + 0.451, + 0.754 + ], + "angle": 0, + "content": "## Step 2: Calculate the number of possible paths" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.754, + 0.698, + 0.766 + ], + "angle": 0, + "content": "This is a classic combinatorial problem. The number of different ways to arrange these moves is:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.776, + 0.473, + 0.788 + ], + "angle": 0, + "content": "C(total moves, horizontal moves) = C(6,2) = 6! / (2! \\(\\times\\) 4!)" + }, + { + "type": "equation", + "bbox": [ + 0.187, + 0.776, + 0.473, + 0.819 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} = (6 \\times 5) / (2 \\times 1) \\\\ = 3 0 / 2 \\\\ = 1 5 \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.832, + 0.549, + 0.844 + ], + "angle": 0, + "content": "Alternatively, we could calculate \\( \\mathrm{C}(6,4) \\) which gives the same result." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.854, + 0.4, + 0.866 + ], + "angle": 0, + "content": "## Step 3: Verify with the given options" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.866, + 0.388, + 0.877 + ], + "angle": 0, + "content": "Our answer of 15 matches option (A)." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.887, + 0.247, + 0.898 + ], + "angle": 0, + "content": "Answer: A" + }, + { + "type": "image_caption", + "bbox": [ + 0.219, + 0.913, + 0.779, + 0.93 + ], + "angle": 0, + "content": "Figure 19: A sample correct case of Algorithmic Reasoning (difficulty: Hard)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "32" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.427, + 0.113, + 0.564, + 0.13 + ], + "angle": 0, + "content": "Analogical (Easy)" + }, + { + "type": "image", + "bbox": [ + 0.594, + 0.113, + 0.613, + 0.127 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.206, + 0.137, + 0.244, + 0.168 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.207, + 0.169, + 0.243, + 0.196 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.249, + 0.138, + 0.286, + 0.167 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.252, + 0.169, + 0.285, + 0.196 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.289, + 0.138, + 0.327, + 0.167 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.29, + 0.174, + 0.305, + 0.184 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.307, + 0.174, + 0.322, + 0.184 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.189, + 0.204, + 0.218, + 0.229 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.23, + 0.202, + 0.244, + 0.229 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.249, + 0.202, + 0.285, + 0.229 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.289, + 0.202, + 0.304, + 0.229 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.318, + 0.202, + 0.35, + 0.229 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.368, + 0.153, + 0.795, + 0.19 + ], + "angle": 0, + "content": "Question: Given the pattern in the first line of blocks, which option at the bottom of the image fits in the question mark in the second line of blocks?" + }, + { + "type": "text", + "bbox": [ + 0.369, + 0.198, + 0.481, + 0.212 + ], + "angle": 0, + "content": "Ground Truth: (C)" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.235, + 0.724, + 0.246 + ], + "angle": 0, + "content": "Claude-3.7-Sonnet-Thinking: Let me analyze the pattern in the first line of blocks and see how it relates to the second line of blocks." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.252, + 0.26, + 0.26 + ], + "angle": 0, + "content": "First line of blocks:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.261, + 0.408, + 0.268 + ], + "angle": 0, + "content": "1. The first block has white on the left and blue on the right" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.269, + 0.402, + 0.277 + ], + "angle": 0, + "content": "2. The second block has white on the top and blue on the" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.277, + 0.411, + 0.285 + ], + "angle": 0, + "content": "3. The third block has blue on the left and white on the right" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.292, + 0.27, + 0.299 + ], + "angle": 0, + "content": "Second line of blocks:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.3, + 0.425, + 0.307 + ], + "angle": 0, + "content": "1. The first block has black on the top and yellow on the bottom" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.308, + 0.427, + 0.316 + ], + "angle": 0, + "content": "2. The second block has yellow on the left and black on the right" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.316, + 0.462, + 0.323 + ], + "angle": 0, + "content": "3. The third block has a question mark (this is what we need to figure out)" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.3, + 0.462, + 0.323 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.33, + 0.482, + 0.339 + ], + "angle": 0, + "content": "Let me think about the possible relationships between the first and second line:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.346, + 0.317, + 0.353 + ], + "angle": 0, + "content": "Possibility 1: Color transformation" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.355, + 0.389, + 0.362 + ], + "angle": 0, + "content": "- Blue in the first line becomes black in the second line" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.363, + 0.401, + 0.37 + ], + "angle": 0, + "content": "- White in the first line becomes yellow in the second line" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.355, + 0.401, + 0.37 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.377, + 0.299, + 0.386 + ], + "angle": 0, + "content": "Possibility 2: Position pattern" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.386, + 0.499, + 0.394 + ], + "angle": 0, + "content": "The first line blocks follow a pattern of right/bottom/left coloring for the blue part." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.394, + 0.52, + 0.401 + ], + "angle": 0, + "content": "The second line blocks might follow the same positional pattern but with different colors." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.401, + 0.811, + 0.417 + ], + "angle": 0, + "content": "Based on the color transformation pattern I identified (blue→black, white→yellow), and looking at the third block in the first line (blue left, white right), the answer should be a block with black on the left and yellow on the right." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.424, + 0.275, + 0.432 + ], + "angle": 0, + "content": "Looking at the options:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.433, + 0.49, + 0.441 + ], + "angle": 0, + "content": "- Option A: Black on top, yellow on bottom (like the first block in the second row)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.441, + 0.468, + 0.448 + ], + "angle": 0, + "content": "- Option B: White on left, pink/red on right (doesn't match our color scheme)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.449, + 0.538, + 0.456 + ], + "angle": 0, + "content": "- Option C: Yellow on top, black on bottom (this would be inverse of first block in second row)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.456, + 0.505, + 0.463 + ], + "angle": 0, + "content": "- Option D: Blue on bottom, white on top (similar to the second block in the first row)" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.433, + 0.538, + 0.463 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.47, + 0.777, + 0.48 + ], + "angle": 0, + "content": "None of the options exactly match what I'm looking for. The relationship between the first line and second line might be more complex. Let me look at it again:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.486, + 0.414, + 0.495 + ], + "angle": 0, + "content": "First block: white left, blue right \\(\\rightarrow\\) black top, yellow bottom" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.495, + 0.423, + 0.503 + ], + "angle": 0, + "content": "Second block: white top, blue bottom \\(\\rightarrow\\) yellow left, black right" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.509, + 0.301, + 0.517 + ], + "angle": 0, + "content": "Let me try to formulate a rule:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.518, + 0.296, + 0.525 + ], + "angle": 0, + "content": "-White areas become yellow" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.526, + 0.284, + 0.532 + ], + "angle": 0, + "content": "- Blue areas become black" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.533, + 0.445, + 0.541 + ], + "angle": 0, + "content": "- Position changes: left→top, right→bottom, top→left, bottom→right" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.509, + 0.445, + 0.541 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.548, + 0.752, + 0.557 + ], + "angle": 0, + "content": "So if the third block has blue left and white right, then the third block in the second row should have black top and yellow bottom according to this rule." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.564, + 0.302, + 0.573 + ], + "angle": 0, + "content": "Option A matches this pattern." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.58, + 0.269, + 0.587 + ], + "angle": 0, + "content": "Let me double-check:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.588, + 0.358, + 0.595 + ], + "angle": 0, + "content": "First rule pattern: white→yellow, blue→black" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.595, + 0.466, + 0.604 + ], + "angle": 0, + "content": "Position transformation: left \\(\\rightarrow\\) top, right \\(\\rightarrow\\) bottom, top \\(\\rightarrow\\) left, bottom \\(\\rightarrow\\) right" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.611, + 0.366, + 0.618 + ], + "angle": 0, + "content": "For block 3 in row 1: blue on left, white on right" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.619, + 0.5, + 0.626 + ], + "angle": 0, + "content": "Applying the rules: blue left becomes black top, white right becomes yellow bottom" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.627, + 0.335, + 0.634 + ], + "angle": 0, + "content": "This gives us: black top, yellow bottom" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.634, + 0.519, + 0.642 + ], + "angle": 0, + "content": "Option A has black on top and yellow on the bottom, so it seems to be the correct answer." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.643, + 0.247, + 0.65 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.651, + 0.624, + 0.658 + ], + "angle": 0, + "content": "To solve this problem, I need to identify the pattern in the first line of blocks and how it transforms in the second line." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.665, + 0.321, + 0.673 + ], + "angle": 0, + "content": "Let me analyze both rows carefully:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.681, + 0.224, + 0.688 + ], + "angle": 0, + "content": "First row:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.689, + 0.336, + 0.696 + ], + "angle": 0, + "content": "- Left block: White on left, blue on right" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.697, + 0.357, + 0.704 + ], + "angle": 0, + "content": "- Middle block: White on top, blue on bottom" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.705, + 0.34, + 0.712 + ], + "angle": 0, + "content": "-Right block:Blue on left, white on right" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.72, + 0.235, + 0.726 + ], + "angle": 0, + "content": "Second row:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.728, + 0.352, + 0.736 + ], + "angle": 0, + "content": "- Left block: Black on top, yellow on bottom" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.737, + 0.354, + 0.744 + ], + "angle": 0, + "content": "- Middle block: Yellow on left, black on right" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.744, + 0.354, + 0.751 + ], + "angle": 0, + "content": "- Right block: ? (what we need to determine)" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.728, + 0.354, + 0.751 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.759, + 0.394, + 0.766 + ], + "angle": 0, + "content": "Looking at the transformation from first to second row:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.767, + 0.458, + 0.774 + ], + "angle": 0, + "content": "1. The blue areas in the first row seem to become black in the second row" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.775, + 0.469, + 0.782 + ], + "angle": 0, + "content": "2. The white areas in the first row seem to become yellow in the second row" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.783, + 0.346, + 0.79 + ], + "angle": 0, + "content": "3. There's also a positional transformation:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.791, + 0.291, + 0.798 + ], + "angle": 0, + "content": "- Left side \\(\\rightarrow\\) Top position" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.767, + 0.469, + 0.798 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.799, + 0.31, + 0.805 + ], + "angle": 0, + "content": "-Right side \\(\\rightarrow\\) Bottom position" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.806, + 0.289, + 0.813 + ], + "angle": 0, + "content": "-Top position \\(\\rightarrow\\) Left side" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.814, + 0.311, + 0.82 + ], + "angle": 0, + "content": "- Bottom position \\(\\rightarrow\\) Right side" + }, + { + "type": "list", + "bbox": [ + 0.194, + 0.799, + 0.311, + 0.82 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.821, + 0.521, + 0.829 + ], + "angle": 0, + "content": "Option A shows black on top and yellow on bottom, which matches our expected pattern." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.836, + 0.228, + 0.844 + ], + "angle": 0, + "content": "Answer: B" + }, + { + "type": "title", + "bbox": [ + 0.186, + 0.85, + 0.379, + 0.862 + ], + "angle": 0, + "content": "Error Category: Reasoning Error" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.862, + 0.812, + 0.898 + ], + "angle": 0, + "content": "Error Reason: The model gets this question wrong because it focuses too much on individual color mappings or partial position patterns rather than identifying the full transformation. The correct pattern involves a consistent clockwise rotation of color positions from the first to the second row." + }, + { + "type": "image_caption", + "bbox": [ + 0.233, + 0.913, + 0.763, + 0.93 + ], + "angle": 0, + "content": "Figure 20: A sample error case of Analogical Reasoning (difficulty: Easy)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "33" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.427, + 0.113, + 0.564, + 0.129 + ], + "angle": 0, + "content": "Analogical (Easy)" + }, + { + "type": "image", + "bbox": [ + 0.594, + 0.113, + 0.614, + 0.127 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.186, + 0.139, + 0.439, + 0.183 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.187, + 0.201, + 0.408, + 0.236 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.223, + 0.246, + 0.293, + 0.29 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.331, + 0.243, + 0.384, + 0.285 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.344, + 0.29, + 0.351, + 0.297 + ], + "angle": 0, + "content": "B" + }, + { + "type": "image", + "bbox": [ + 0.226, + 0.311, + 0.288, + 0.343 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.25, + 0.345, + 0.258, + 0.352 + ], + "angle": 0, + "content": "C" + }, + { + "type": "image", + "bbox": [ + 0.331, + 0.311, + 0.407, + 0.344 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.358, + 0.345, + 0.366, + 0.352 + ], + "angle": 0, + "content": "D" + }, + { + "type": "text", + "bbox": [ + 0.455, + 0.214, + 0.792, + 0.24 + ], + "angle": 0, + "content": "Question: Given the analogy in the first line of items, which choice fits best in the black question mark?" + }, + { + "type": "text", + "bbox": [ + 0.457, + 0.248, + 0.569, + 0.262 + ], + "angle": 0, + "content": "Ground Truth: (C)" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.358, + 0.588, + 0.37 + ], + "angle": 0, + "content": "Claude-3.7-Sonnet-Thinking: Let's analyze the analogy in the first line:" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.378, + 0.624, + 0.39 + ], + "angle": 0, + "content": "The first line shows sliced bread on the left and a knife on the right, connected by an arrow." + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.398, + 0.813, + 0.419 + ], + "angle": 0, + "content": "The relationship here appears to be: the knife is a tool used to cut/slice the bread. The knife is what transforms a whole loaf of bread into slices." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.428, + 0.348, + 0.439 + ], + "angle": 0, + "content": "Now, let's look at the second line:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.439, + 0.422, + 0.449 + ], + "angle": 0, + "content": "- On the left, we see a log (a section of tree trunk)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.449, + 0.609, + 0.46 + ], + "angle": 0, + "content": "- On the right, we need to determine which tool would be used to process or cut this log" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.469, + 0.316, + 0.479 + ], + "angle": 0, + "content": "Let's examine each option:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.479, + 0.367, + 0.489 + ], + "angle": 0, + "content": "(A) A wooden shed or small building" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.489, + 0.336, + 0.498 + ], + "angle": 0, + "content": "(B) A cleaver or butcher's knife" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.499, + 0.241, + 0.508 + ], + "angle": 0, + "content": "(C) An axe" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.509, + 0.362, + 0.519 + ], + "angle": 0, + "content": "(D) Sliced butter on a wooden board" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.479, + 0.367, + 0.519 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.528, + 0.712, + 0.54 + ], + "angle": 0, + "content": "The question is asking which tool would be used to process a log, similar to how a knife is used to slice bread." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.548, + 0.644, + 0.559 + ], + "angle": 0, + "content": "- Option A (wooden shed) is not a tool but a structure that might be built using processed wood" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.559, + 0.581, + 0.569 + ], + "angle": 0, + "content": "- Option B (cleaver/butcher's knife) is a cutting tool but not typically used for logs" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.569, + 0.517, + 0.579 + ], + "angle": 0, + "content": "- Option C (axe) is a tool specifically designed for cutting logs/wood" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.579, + 0.489, + 0.59 + ], + "angle": 0, + "content": "- Option D (sliced butter) is not a tool at all, but a food product" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.548, + 0.644, + 0.59 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.598, + 0.811, + 0.62 + ], + "angle": 0, + "content": "Following the analogy \"bread \\(\\rightarrow\\) knife\" (something that needs to be cut \\(\\rightarrow\\) tool used to cut it), the logical completion for \"log \\(\\rightarrow\\) ?\" would be an axe, which is a tool specifically designed to cut wood." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.628, + 0.416, + 0.639 + ], + "angle": 0, + "content": "Therefore, the answer appears to be (C) the axe." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.639, + 0.266, + 0.649 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.649, + 0.811, + 0.669 + ], + "angle": 0, + "content": "I need to determine the relationship between the items in the first line and then apply the same relationship to find the missing item in the second line." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.679, + 0.304, + 0.689 + ], + "angle": 0, + "content": "Looking at the first line:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.689, + 0.487, + 0.698 + ], + "angle": 0, + "content": "- On the left, there's a loaf of bread with some slices cut from it" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.699, + 0.326, + 0.708 + ], + "angle": 0, + "content": "- On the right, there's a knife" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.709, + 0.529, + 0.719 + ], + "angle": 0, + "content": "- The relationship is that the knife is the tool used to cut/slice the bread" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.689, + 0.529, + 0.719 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.728, + 0.309, + 0.738 + ], + "angle": 0, + "content": "Now, for the second line:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.739, + 0.421, + 0.749 + ], + "angle": 0, + "content": "- On the left, there's a log (a section of tree trunk)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.749, + 0.635, + 0.76 + ], + "angle": 0, + "content": "- On the right, I need to find the appropriate tool that would be used to cut or process this log" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.739, + 0.635, + 0.76 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.768, + 0.324, + 0.779 + ], + "angle": 0, + "content": "Let me analyze each option:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.779, + 0.777, + 0.789 + ], + "angle": 0, + "content": "(A) A wooden shed/small building - This is a structure that might be built using processed wood, not a tool for cutting logs" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.789, + 0.606, + 0.799 + ], + "angle": 0, + "content": "(B) A cleaver/butcher's knife - While this is a cutting tool, it's not typically used for logs" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.799, + 0.589, + 0.809 + ], + "angle": 0, + "content": "(C) An axe - This is a tool specifically designed for chopping and cutting logs/wood" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.809, + 0.577, + 0.82 + ], + "angle": 0, + "content": "(D) Sliced butter on a board - This is a food product, not a tool for processing logs" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.779, + 0.777, + 0.82 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.828, + 0.459, + 0.839 + ], + "angle": 0, + "content": "The pattern is \"material \\(\\rightarrow\\) tool used to cut that material\"" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.839, + 0.266, + 0.848 + ], + "angle": 0, + "content": "-Bread \\(\\rightarrow\\) Knife" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.848, + 0.235, + 0.859 + ], + "angle": 0, + "content": "- \\(\\mathrm{Log}\\rightarrow ?\\)" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.839, + 0.266, + 0.859 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.868, + 0.651, + 0.88 + ], + "angle": 0, + "content": "An axe is the tool specifically designed for cutting logs, just as a knife is the tool for slicing bread." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.888, + 0.241, + 0.898 + ], + "angle": 0, + "content": "Answer: C" + }, + { + "type": "image_caption", + "bbox": [ + 0.226, + 0.913, + 0.771, + 0.93 + ], + "angle": 0, + "content": "Figure 21: A sample correct case of Analogical Reasoning (difficulty: Easy)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "34" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.411, + 0.112, + 0.579, + 0.129 + ], + "angle": 0, + "content": "Analogical (Medium)" + }, + { + "type": "image", + "bbox": [ + 0.593, + 0.113, + 0.614, + 0.127 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.197, + 0.134, + 0.242, + 0.19 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.254, + 0.14, + 0.324, + 0.185 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.325, + 0.161, + 0.335, + 0.168 + ], + "angle": 0, + "content": ":" + }, + { + "type": "image", + "bbox": [ + 0.336, + 0.14, + 0.409, + 0.185 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.411, + 0.136, + 0.474, + 0.189 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.309, + 0.202, + 0.353, + 0.209 + ], + "angle": 0, + "content": "Candidates" + }, + { + "type": "image", + "bbox": [ + 0.202, + 0.211, + 0.241, + 0.257 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.267, + 0.211, + 0.321, + 0.257 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.211, + 0.405, + 0.257 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.411, + 0.211, + 0.468, + 0.257 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.491, + 0.165, + 0.791, + 0.203 + ], + "angle": 0, + "content": "Question: Based on the relationship between Origin and Analogy, select the option from the candidates that best completes this analogy." + }, + { + "type": "text", + "bbox": [ + 0.493, + 0.21, + 0.605, + 0.224 + ], + "angle": 0, + "content": "Ground Truth: (C)" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.263, + 0.813, + 0.281 + ], + "angle": 0, + "content": "Clae3-7-Sonnt-Thinkin: thinkin> h i th f th t, and then identify which candidate option (A, B, C, or D) best completes the right side of the equation when paired with \"Origini\"." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.287, + 0.282, + 0.296 + ], + "angle": 0, + "content": "Let's analyze the images:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.303, + 0.223, + 0.31 + ], + "angle": 0, + "content": "Left side:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.311, + 0.525, + 0.319 + ], + "angle": 0, + "content": "- Origin: A person installing or working on what appears to be a large appliance or system" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.32, + 0.586, + 0.328 + ], + "angle": 0, + "content": "- Analogy: A person installing or repairing what looks like a garage door opener or ceiling-mounted device" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.334, + 0.23, + 0.342 + ], + "angle": 0, + "content": "Right side:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.342, + 0.632, + 0.35 + ], + "angle": 0, + "content": "- Origin: A person's hand operating what appears to be a sound mixing board or control panel with sliders and buttons" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.35, + 0.458, + 0.358 + ], + "angle": 0, + "content": "- Analogy': This is what we need to determine from options A, B, C, or D" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.365, + 0.406, + 0.373 + ], + "angle": 0, + "content": "The relationship between Origin and Analogy seems to be:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.373, + 0.811, + 0.39 + ], + "angle": 0, + "content": "Both involve installation, repair, or maintenance of mechanical/electrical systems, but in different contexts. They show people actively working on or operating different types of equipment or machinery." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.396, + 0.691, + 0.405 + ], + "angle": 0, + "content": "Now, let's look at the candidate options and see which one best represents the same relationship with Origin' (the sound mixing board):" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.412, + 0.376, + 0.42 + ], + "angle": 0, + "content": "A: A person painting or working on a ceiling/wall" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.42, + 0.532, + 0.428 + ], + "angle": 0, + "content": "B: A close-up of someone working on or repairing what appears to be electronic components" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.429, + 0.4, + 0.437 + ], + "angle": 0, + "content": "C: Some mechanical or electronic equipment with wiring" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.437, + 0.583, + 0.445 + ], + "angle": 0, + "content": "D: People moving or installing what looks like a home appliance (possibly a refrigerator or washer/dryer)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.451, + 0.747, + 0.46 + ], + "angle": 0, + "content": "I need to determine which option shows a relationship to the sound mixing board (Origin') that parallels the relationship between the first two images." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.466, + 0.811, + 0.483 + ], + "angle": 0, + "content": "If Origin shows someone installing/repairing an appliance, and Analogy shows someone installing/repairing a different type of equipment (garage door opener), then the relationship is about installation/repair of different types of systems." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.489, + 0.8, + 0.499 + ], + "angle": 0, + "content": "Similarly, if Origin' shows someone operating a sound mixing board, then Analogy' should show someone operating a different type of equipment or control system." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.505, + 0.275, + 0.513 + ], + "angle": 0, + "content": "Looking at the options:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.513, + 0.531, + 0.522 + ], + "angle": 0, + "content": "A: Shows installation/repair work (similar to Origin/ Analogy) but not operation of controls" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.523, + 0.651, + 0.531 + ], + "angle": 0, + "content": "B: Shows repair/maintenance of electronic components" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.531, + 0.402, + 0.538 + ], + "angle": 0, + "content": "C: Shows equipment but not clearly someone operating it." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.538, + 0.371, + 0.545 + ], + "angle": 0, + "content": "D: Shows people moving/installing an appliance" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.552, + 0.81, + 0.56 + ], + "angle": 0, + "content": "The key relationship between Origin and Analogy seems to be: a person actively working on installing or maintaining a system. Similarly, Origin' shows a person" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.56, + 0.375, + 0.569 + ], + "angle": 0, + "content": "actively operating a control system (mixing board)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.575, + 0.811, + 0.592 + ], + "angle": 0, + "content": "Option B seems closest as it shows hands working on electronic components, which could be considered operating or maintaining another type of electronic system. Though it's not entirely clear from the image, this appears to be the closest match to the relationship pattern." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.592, + 0.247, + 0.599 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.6, + 0.262, + 0.607 + ], + "angle": 0, + "content": "Analogy Analysis" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.614, + 0.81, + 0.631 + ], + "angle": 0, + "content": "I need to understand the relationship between \"Origin\" and \"Analogy\" in the first pair, then apply that same relationship to find what completes the \"Origin\" and \"Analogy\" pair." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.637, + 0.292, + 0.645 + ], + "angle": 0, + "content": "Examining the first pair:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.645, + 0.621, + 0.654 + ], + "angle": 0, + "content": "- \\*\\*Origin\\*\\*: Shows a person installing or repairing what appears to be a large appliance (possibly a washer or dryer)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.654, + 0.627, + 0.662 + ], + "angle": 0, + "content": "- \\*\\*Analogy\\*\\*: Shows a person installing or working on a ceiling-mounted device (appears to be a garage door opener)" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.668, + 0.774, + 0.678 + ], + "angle": 0, + "content": "The relationship seems to be: Both involve installation/repair/maintenance of household mechanical systems, but in different locations and of different types." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.685, + 0.303, + 0.693 + ], + "angle": 0, + "content": "Examining the second pair:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.693, + 0.585, + 0.7 + ], + "angle": 0, + "content": "- \\*\\*Origin\\*\\*: Shows hands operating a sound mixing board or audio control panel with sliders and buttons" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.7, + 0.373, + 0.708 + ], + "angle": 0, + "content": "- \\*\\*Analogy\\*\\*: This is what we need to determine" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.715, + 0.811, + 0.732 + ], + "angle": 0, + "content": "I need to find an option that shows someone working with or operating another type of electronic/technical equipment, maintaining the same relationship as the first pair." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.739, + 0.286, + 0.746 + ], + "angle": 0, + "content": "Analyzing the options:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.747, + 0.409, + 0.755 + ], + "angle": 0, + "content": "- \\(\\mathrm{^{**}A^{**}}\\): Person painting or installing something on a ceiling" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.755, + 0.467, + 0.763 + ], + "angle": 0, + "content": "- \\(\\mathbf{\\Pi}^{**}\\mathbf{B}^{**}\\) : Close-up of hands working on electronic components/circuit board" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.763, + 0.425, + 0.77 + ], + "angle": 0, + "content": "- \\(\\mathbf{\\ddot{C}}^{\\star \\star}\\) : Complex mechanical/electronic equipment with wiring" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.77, + 0.51, + 0.779 + ], + "angle": 0, + "content": "- \\(\\mathbf{^{**}D^{**}}\\): Two people moving or installing what appears to be a refrigerator or appliance" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.785, + 0.7, + 0.794 + ], + "angle": 0, + "content": "The relationship we're looking for should show hands working on/operating a different type of technical equipment than a mixing board." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.801, + 0.811, + 0.818 + ], + "angle": 0, + "content": "Option B shows hands working directly with electronic components/circuit board, which parallels how the hands in Origin are working directly with the audio equipment. Both involve hands-on technical work with electronic systems, just different types." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.824, + 0.228, + 0.832 + ], + "angle": 0, + "content": "Answer: B" + }, + { + "type": "title", + "bbox": [ + 0.187, + 0.837, + 0.379, + 0.848 + ], + "angle": 0, + "content": "Error Category: Reasoning Error" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.849, + 0.811, + 0.898 + ], + "angle": 0, + "content": "Error Reason: The model gets the answer wrong because it overemphasizes the type of equipment being worked on (mechanical vs. electronic) rather than the specific action and interface involved in the analogy. Option C correctly mirrors the using / fixing relationship seen in the first pair of analogy, while Option B lacks this direct fixing interaction, despite a similar technical context." + }, + { + "type": "image_caption", + "bbox": [ + 0.22, + 0.914, + 0.778, + 0.93 + ], + "angle": 0, + "content": "Figure 22: A sample error case of Analogical Reasoning (difficulty: Medium)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "35" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.411, + 0.112, + 0.579, + 0.129 + ], + "angle": 0, + "content": "Analogical (Medium)" + }, + { + "type": "image", + "bbox": [ + 0.594, + 0.113, + 0.614, + 0.127 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.145, + 0.266, + 0.197 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.267, + 0.143, + 0.35, + 0.199 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.351, + 0.144, + 0.455, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.455, + 0.136, + 0.541, + 0.206 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.334, + 0.222, + 0.386, + 0.23 + ], + "angle": 0, + "content": "Candidates" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.235, + 0.271, + 0.287 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.286, + 0.233, + 0.344, + 0.291 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.361, + 0.234, + 0.449, + 0.288 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.451, + 0.235, + 0.539, + 0.288 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.549, + 0.178, + 0.812, + 0.229 + ], + "angle": 0, + "content": "Question: Based on the relationship between Origin and Analogy, select the option from the candidates that best completes this analogy." + }, + { + "type": "text", + "bbox": [ + 0.55, + 0.238, + 0.662, + 0.251 + ], + "angle": 0, + "content": "Ground Truth: (B)" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.303, + 0.81, + 0.328 + ], + "angle": 0, + "content": "Claude-3.7-Sonnet-Thinking: Let me analyze the relationship between the \"Origin\" and \"Analogy\" images to determine the pattern, and then apply that to find the best match for \"Origin\" from the candidates." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.337, + 0.272, + 0.349 + ], + "angle": 0, + "content": "In the first pair:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.349, + 0.613, + 0.36 + ], + "angle": 0, + "content": "- Origin: A dog (looks like a spaniel or similar breed) searching/sniffing in grass" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.36, + 0.729, + 0.371 + ], + "angle": 0, + "content": "- Analogy: What appears to be a small animal (possibly a rodent) in its natural habitat with vegetation" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.349, + 0.729, + 0.371 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.381, + 0.288, + 0.393 + ], + "angle": 0, + "content": "In the second pair:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.393, + 0.647, + 0.404 + ], + "angle": 0, + "content": "- Origin': A dog (appears to be a small white dog) eating or drinking from a bowl/dish" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.404, + 0.389, + 0.415 + ], + "angle": 0, + "content": "- Analogy': We need to determine this" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.393, + 0.647, + 0.415 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.425, + 0.81, + 0.45 + ], + "angle": 0, + "content": "The relationship seems to be showing an animal (dog) in the Origin image, then showing what might be a prey animal or similar smaller animal in a related natural context in the Analogy image." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.458, + 0.778, + 0.471 + ], + "angle": 0, + "content": "So, for Origin' which shows a dog eating/ drinking, the Analogy' should show some smaller animal also eating." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.481, + 0.33, + 0.493 + ], + "angle": 0, + "content": "Looking at the candidates:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.493, + 0.625, + 0.504 + ], + "angle": 0, + "content": "A: Shows a dog with what appears to be a stick or something in its mouth in snow." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.505, + 0.465, + 0.515 + ], + "angle": 0, + "content": "B: Shows a mouse or small rodent eating something" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.516, + 0.562, + 0.526 + ], + "angle": 0, + "content": "C: Shows what looks like a groundhog or marmot near flowers/plants" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.527, + 0.381, + 0.538 + ], + "angle": 0, + "content": "D: Shows another dog's face/profile" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.493, + 0.625, + 0.538 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.548, + 0.812, + 0.582 + ], + "angle": 0, + "content": "Based on the pattern (dog \\(\\rightarrow\\) smaller animal in natural context, especially related to the activity the dog is doing), option B (the mouse/rodent eating) would best complete the analogy since it shows a smaller animal doing the same activity (eating) as the dog in Origin!" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.582, + 0.274, + 0.593 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.593, + 0.812, + 0.616 + ], + "angle": 0, + "content": "I need to determine the relationship between the \"Origin\" and \"Analogy\" images, and then apply that same relationship to find the appropriate \"Analogy\"\" that matches \"Origin\"\"" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.626, + 0.318, + 0.638 + ], + "angle": 0, + "content": "Looking at the first pair:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.638, + 0.612, + 0.649 + ], + "angle": 0, + "content": "- Origin: A dog searching or sniffing in grass (appears to be hunting or tracking)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.649, + 0.644, + 0.66 + ], + "angle": 0, + "content": "- Analogy: A small animal (looks like a rodent) in its natural habitat among vegetation" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.638, + 0.644, + 0.66 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.67, + 0.359, + 0.681 + ], + "angle": 0, + "content": "Now looking at the second pair:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.682, + 0.468, + 0.693 + ], + "angle": 0, + "content": "- Origin: A dog eating or drinking from a bowl/dish" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.693, + 0.422, + 0.704 + ], + "angle": 0, + "content": "- Analogy': This is what I need to determine" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.682, + 0.468, + 0.704 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.714, + 0.812, + 0.738 + ], + "angle": 0, + "content": "The relationship between the first pair seems to be: a dog engaged in a natural behavior (hunting/tracking), followed by what could be its potential prey or a similar smaller animal in its natural environment." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.747, + 0.812, + 0.772 + ], + "angle": 0, + "content": "Following this pattern, the \"Analogy\" should show a smaller animal engaged in eating or drinking (similar to what the dog is doing in \"Origin\")." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.781, + 0.33, + 0.793 + ], + "angle": 0, + "content": "Examining the candidates:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.794, + 0.438, + 0.804 + ], + "angle": 0, + "content": "A: A dog with something in its mouth on snow" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.805, + 0.388, + 0.815 + ], + "angle": 0, + "content": "B: A mouse/small rodent eating food" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.816, + 0.583, + 0.827 + ], + "angle": 0, + "content": "C: What appears to be a groundhog or marmot among vegetation/flowers" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.827, + 0.361, + 0.838 + ], + "angle": 0, + "content": "D: A profile view of another dog" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.794, + 0.583, + 0.838 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.848, + 0.812, + 0.872 + ], + "angle": 0, + "content": "Since the pattern is showing a smaller animal doing a similar activity as the dog, option B (the mouse eating) best completes the analogy because it shows a smaller animal engaged in the same activity (eating) as the dog in Origin!" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.88, + 0.245, + 0.892 + ], + "angle": 0, + "content": "Answer: B" + }, + { + "type": "image_caption", + "bbox": [ + 0.212, + 0.913, + 0.785, + 0.93 + ], + "angle": 0, + "content": "Figure 23: A sample correct case of Analogical Reasoning (difficulty: Medium)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "36" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.424, + 0.112, + 0.566, + 0.129 + ], + "angle": 0, + "content": "Analogical (Hard)" + }, + { + "type": "image", + "bbox": [ + 0.594, + 0.113, + 0.613, + 0.127 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.149, + 0.408, + 0.227 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.413, + 0.141, + 0.813, + 0.216 + ], + "angle": 0, + "content": "Question: This question contains two sets of figures and 4 figures to choose from. The two sets of figures have certain similarities as well as differences. You are required to choose one option that is most suitable to replace the question mark. The correct answer should not only make the two sets of figures the most similar, but also shows uniqueness for the second set of figures." + }, + { + "type": "title", + "bbox": [ + 0.415, + 0.225, + 0.528, + 0.239 + ], + "angle": 0, + "content": "Ground Truth: (A)" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.248, + 0.812, + 0.266 + ], + "angle": 0, + "content": "Claude-3.7-Sonnet-Thinking: Let me analyze this question carefully. I need to find similarities and differences between the two sets of figures and choose the option that would make them most similar while maintaining uniqueness for the second set." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.272, + 0.313, + 0.28 + ], + "angle": 0, + "content": "The first set of figures consists of:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.281, + 0.59, + 0.289 + ], + "angle": 0, + "content": "1. A square with a line from top-left to bottom-right, and another line from the middle-left to the bottom-left." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.289, + 0.297, + 0.296 + ], + "angle": 0, + "content": "2. A square with zigzag lines." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.296, + 0.55, + 0.304 + ], + "angle": 0, + "content": "3. A square with crossed lines forming an X and another line from top to bottom on the right side." + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.281, + 0.59, + 0.304 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.311, + 0.324, + 0.318 + ], + "angle": 0, + "content": "The second set of figures consists of:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.32, + 0.552, + 0.327 + ], + "angle": 0, + "content": "1. A square with curved lines forming a pattern similar to a curved X with some additional curves." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.328, + 0.367, + 0.335 + ], + "angle": 0, + "content": "2. A square with a circle and curved lines inside." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.335, + 0.393, + 0.343 + ], + "angle": 0, + "content": "3. The question mark - this is what I need to determine." + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.32, + 0.552, + 0.343 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.35, + 0.464, + 0.358 + ], + "angle": 0, + "content": "Now let's look at the possible options for the third figure in the second set:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.36, + 0.422, + 0.366 + ], + "angle": 0, + "content": "A. A square with curved semi-circles on the left and right sides" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.367, + 0.381, + 0.373 + ], + "angle": 0, + "content": "B. A square with leaf-like shapes in the four corners." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.374, + 0.483, + 0.382 + ], + "angle": 0, + "content": "C. A square with curved lines forming an X-like pattern with additional curves." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.383, + 0.402, + 0.39 + ], + "angle": 0, + "content": "D. A square with star-like pattern created by curved lines." + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.35, + 0.483, + 0.39 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.396, + 0.505, + 0.405 + ], + "angle": 0, + "content": "To find the pattern, I need to look for similarities between the first set and second set:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.412, + 0.333, + 0.419 + ], + "angle": 0, + "content": "Comparing the first figures of each set:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.421, + 0.322, + 0.428 + ], + "angle": 0, + "content": "- First set: Square with straight lines" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.429, + 0.329, + 0.436 + ], + "angle": 0, + "content": "- Second set: Square with curved lines" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.421, + 0.329, + 0.436 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.443, + 0.345, + 0.451 + ], + "angle": 0, + "content": "Comparing the second figures of each set:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.453, + 0.348, + 0.46 + ], + "angle": 0, + "content": "- First set: Square with zigzag straight lines" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.46, + 0.373, + 0.467 + ], + "angle": 0, + "content": "- Second set: Square with a circle and curved lines" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.443, + 0.373, + 0.467 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.474, + 0.811, + 0.491 + ], + "angle": 0, + "content": "I notice that the second set seems to be using curved lines where the first set uses straight lines. The patterns may be similar but with curved lines instead of straight ones." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.497, + 0.811, + 0.515 + ], + "angle": 0, + "content": "Looking at the third figure in the first set, it has crossed lines forming an X with an additional line. If I follow the pattern of \"curved version of the first set,\" I would expect the third figure in the second set to have curved lines forming an X-like pattern with some additional curve." + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.474, + 0.811, + 0.515 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.52, + 0.672, + 0.53 + ], + "angle": 0, + "content": "Looking at the options, option C seems to match this pattern - it has curved lines forming an X-like pattern with additional curves." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.537, + 0.438, + 0.546 + ], + "angle": 0, + "content": "However, I should also check if there's another pattern I'm missing:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.552, + 0.811, + 0.57 + ], + "angle": 0, + "content": "Another possibility is that there's a progression across each row. In the first set, we see increasing complexity or number of lines. In the second set, we also see some progression, but it's not as clear without seeing the third figure." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.575, + 0.811, + 0.591 + ], + "angle": 0, + "content": "Given the information and options available, option C seems to be the most logical choice as it represents a curved version of the X pattern seen in the third figure of the first set." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.591, + 0.249, + 0.599 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.599, + 0.61, + 0.608 + ], + "angle": 0, + "content": "I need to analyze both sets of figures to find patterns and determine which option would complete the second set." + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.591, + 0.61, + 0.608 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.614, + 0.341, + 0.624 + ], + "angle": 0, + "content": "First, let me examine both sets of figures:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.63, + 0.275, + 0.638 + ], + "angle": 0, + "content": "**First Set (Top Row):**" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.639, + 0.424, + 0.646 + ], + "angle": 0, + "content": "1. Square with straight lines forming an angle in the bottom left" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.647, + 0.323, + 0.654 + ], + "angle": 0, + "content": "2. Square with zigzag diagonal lines" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.654, + 0.459, + 0.662 + ], + "angle": 0, + "content": "3. Square with straight lines forming an X plus a vertical line on the right" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.639, + 0.459, + 0.662 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.669, + 0.298, + 0.677 + ], + "angle": 0, + "content": "**Second Set (Middle Row):**" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.677, + 0.43, + 0.685 + ], + "angle": 0, + "content": "1. Square with curved lines forming what looks like a distorted X" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.685, + 0.334, + 0.692 + ], + "angle": 0, + "content": "2. Square with a circle and curved lines" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.692, + 0.323, + 0.7 + ], + "angle": 0, + "content": "3. Question mark - to be determined" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.677, + 0.43, + 0.7 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.707, + 0.678, + 0.717 + ], + "angle": 0, + "content": "**Key Observation:** The second set appears to use curved lines to create patterns similar to the straight-line patterns in the first set." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.723, + 0.292, + 0.731 + ], + "angle": 0, + "content": "Looking at the relationship:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.731, + 0.482, + 0.739 + ], + "angle": 0, + "content": "- First figure in set 1 has straight angles; first figure in set 2 has curved versions" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.74, + 0.526, + 0.747 + ], + "angle": 0, + "content": "- Second figure in set 1 has zigzag lines; second figure in set 2 has curved lines with a circle" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.723, + 0.526, + 0.747 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.754, + 0.716, + 0.764 + ], + "angle": 0, + "content": "For the third position, I should look for a figure that uses curved lines to represent the X-pattern plus vertical line from the third figure in set 1." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.77, + 0.275, + 0.778 + ], + "angle": 0, + "content": "Looking at the options:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.778, + 0.42, + 0.786 + ], + "angle": 0, + "content": "- Option A: Curved lines forming semi-circles on left and right" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.786, + 0.411, + 0.794 + ], + "angle": 0, + "content": "- Option B: Curved lines forming leaf-like shapes in corners" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.794, + 0.462, + 0.802 + ], + "angle": 0, + "content": "- Option C: Curved lines forming an X-like pattern with additional curves" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.802, + 0.381, + 0.81 + ], + "angle": 0, + "content": "- Option D: Curved lines forming a star-like pattern" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.778, + 0.462, + 0.81 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.816, + 0.811, + 0.834 + ], + "angle": 0, + "content": "Option C seems most appropriate as it shows curved lines forming an X-like pattern, similar to how the third figure in the first set has straight lines forming an X. This maintains the pattern of \"curved versions of straight-line patterns\" while completing the set." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.84, + 0.23, + 0.848 + ], + "angle": 0, + "content": "Answer: C" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.852, + 0.38, + 0.865 + ], + "angle": 0, + "content": "Error Category: Reasoning Error" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.865, + 0.811, + 0.9 + ], + "angle": 0, + "content": "Error Reason: The model incorrectly chooses (C) because it overly focuses on surface-level visual similarity (the X-like shape) without accounting for the systematic transformation from the first row of blocks to the second row of blocks." + }, + { + "type": "image_caption", + "bbox": [ + 0.23, + 0.913, + 0.766, + 0.93 + ], + "angle": 0, + "content": "Figure 24: A sample error case of Analogical Reasoning (difficulty: Hard)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "37" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.424, + 0.113, + 0.566, + 0.129 + ], + "angle": 0, + "content": "Analogical (Hard)" + }, + { + "type": "image", + "bbox": [ + 0.594, + 0.113, + 0.613, + 0.127 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.181, + 0.14, + 0.405, + 0.171 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.223, + 0.174, + 0.364, + 0.205 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.238, + 0.206, + 0.35, + 0.213 + ], + "angle": 0, + "content": "A B C D" + }, + { + "type": "text", + "bbox": [ + 0.412, + 0.135, + 0.813, + 0.21 + ], + "angle": 0, + "content": "Question: This question contains two sets of figures and 4 figures to choose from. The two sets of figures have certain similarities as well as differences. You are required to choose one option that is most suitable to replace the question mark. The correct answer should not only make the two sets of figures the most similar, but also shows uniqueness for the second set of figures." + }, + { + "type": "title", + "bbox": [ + 0.415, + 0.212, + 0.526, + 0.226 + ], + "angle": 0, + "content": "Ground Truth: (C)" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.231, + 0.489, + 0.241 + ], + "angle": 0, + "content": "Claude-3.7-Sonnet-Thinking: Let me carefully analyze this problem." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.247, + 0.293, + 0.256 + ], + "angle": 0, + "content": "We have two sets of figures:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.256, + 0.31, + 0.263 + ], + "angle": 0, + "content": "- First set: Three figures in a row" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.263, + 0.36, + 0.271 + ], + "angle": 0, + "content": "- Second set: Two figures and a question mark" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.247, + 0.36, + 0.271 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.277, + 0.81, + 0.287 + ], + "angle": 0, + "content": "I need to find the best option (A, B, C, or D) to replace the question mark, such that the two sets become most similar while preserving the uniqueness of the second set." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.294, + 0.31, + 0.302 + ], + "angle": 0, + "content": "First, let me analyze the first set:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.302, + 0.584, + 0.309 + ], + "angle": 0, + "content": "1. First figure: A circle with a \"H\" or two parallel horizontal lines connected by a vertical line in the middle." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.309, + 0.474, + 0.317 + ], + "angle": 0, + "content": "2. Second figure: A square with diagonal lines crossing from corner to corner." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.317, + 0.302, + 0.325 + ], + "angle": 0, + "content": "3. Third figure: A simple circle." + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.302, + 0.584, + 0.325 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.332, + 0.422, + 0.34 + ], + "angle": 0, + "content": "Now, let me analyze the second set (before the question mark):" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.341, + 0.604, + 0.348 + ], + "angle": 0, + "content": "1. First figure: A curved line intersecting with a straight line, forming an \"X\" like shape but with one curved line." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.349, + 0.56, + 0.357 + ], + "angle": 0, + "content": "2. Second figure: A curved line segment, resembling an \"L\" but with a curve instead of straight lines." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.357, + 0.385, + 0.365 + ], + "angle": 0, + "content": "3. Question mark: This is what we need to figure out." + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.341, + 0.604, + 0.365 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.371, + 0.28, + 0.38 + ], + "angle": 0, + "content": "Let's look at the options:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.38, + 0.422, + 0.388 + ], + "angle": 0, + "content": "A: Two crossing lines with a small curve at one end of one line." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.388, + 0.341, + 0.395 + ], + "angle": 0, + "content": "B: Two curved lines crossing in an \"X\" for" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.395, + 0.49, + 0.403 + ], + "angle": 0, + "content": "C: A circle with two perpendicular lines crossing it, forming four equal segments." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.403, + 0.45, + 0.411 + ], + "angle": 0, + "content": "D: A circular pattern with curved lines, creating a swirl or spiral effect." + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.38, + 0.49, + 0.411 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.418, + 0.468, + 0.427 + ], + "angle": 0, + "content": "Now, let me try to identify the pattern or relationship between the two sets:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.433, + 0.663, + 0.443 + ], + "angle": 0, + "content": "In the first set, we start with a circle containing a shape (H), then move to a square with crossed lines, and finally a simple circle." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.449, + 0.811, + 0.466 + ], + "angle": 0, + "content": "In the second set, we start with crossed lines (one curved), then a curved line segment. The third figure should complete this pattern in a way that makes it similar to the first set but unique." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.472, + 0.404, + 0.481 + ], + "angle": 0, + "content": "Looking at the transformation from first set to second set:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.481, + 0.474, + 0.489 + ], + "angle": 0, + "content": "1. The first figure in both sets involves lines (H-shape vs X-shape with curve)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.489, + 0.549, + 0.497 + ], + "angle": 0, + "content": "2. The second figure in both sets involves diagonal/angled elements (crossed square vs curved L)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.497, + 0.372, + 0.505 + ], + "angle": 0, + "content": "3. The third figure in the first set is a simple circle" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.481, + 0.549, + 0.505 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.511, + 0.464, + 0.521 + ], + "angle": 0, + "content": "So what would make the third figure in the second set similar yet unique?" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.526, + 0.811, + 0.544 + ], + "angle": 0, + "content": "Option C stands out because it's a circle (like the third figure in the first set) but with internal lines dividing it (maintaining the \"line\" theme of the second set). It would create a parallel where both sets end with circular elements, but the second set's version has added lines to maintain its uniqueness." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.549, + 0.798, + 0.56 + ], + "angle": 0, + "content": "Let me double-check by considering another angle. Maybe there's a transformation rule from each figure in the first set to the corresponding figure in the second set:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.566, + 0.54, + 0.575 + ], + "angle": 0, + "content": "1. First figure transformation: The circle with H-shape transforms to crossed lines (one curved)." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.576, + 0.523, + 0.583 + ], + "angle": 0, + "content": "2. Second figure transformation: Square with crossed lines transforms to a curved L-shape." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.583, + 0.701, + 0.59 + ], + "angle": 0, + "content": "3. Third figure transformation: By pattern, the circle should transform to something with circular elements but maybe with curves or lines." + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.566, + 0.701, + 0.59 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.596, + 0.669, + 0.606 + ], + "angle": 0, + "content": "Looking at options again, C and D both have circular elements. C has straight lines dividing the circle while D has a swirl pattern." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.612, + 0.781, + 0.622 + ], + "angle": 0, + "content": "Option C seems most consistent with the pattern of the second set (which involves lines) while maintaining similarity to the third figure of the first set (a circle)." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.628, + 0.336, + 0.636 + ], + "angle": 0, + "content": "So I believe C is the most likely answer." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.637, + 0.249, + 0.644 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.644, + 0.51, + 0.653 + ], + "angle": 0, + "content": "I'll solve this step by step by analyzing the two sets of figures and looking for patterns." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.659, + 0.272, + 0.667 + ], + "angle": 0, + "content": "**First Set of Figures:**" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.667, + 0.482, + 0.675 + ], + "angle": 0, + "content": "1. A circle with a \"H\" shape inside (horizontal lines connected by a vertical line)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.675, + 0.334, + 0.683 + ], + "angle": 0, + "content": "2. A square with diagonal crossed lines" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.683, + 0.251, + 0.691 + ], + "angle": 0, + "content": "3. A simple circle" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.667, + 0.482, + 0.691 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.699, + 0.283, + 0.706 + ], + "angle": 0, + "content": "**Second Set of Figures:**" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.706, + 0.383, + 0.714 + ], + "angle": 0, + "content": "1. An intersection of a straight line and a curved line" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.714, + 0.316, + 0.722 + ], + "angle": 0, + "content": "2. A curved line in an L-like shape" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.722, + 0.347, + 0.73 + ], + "angle": 0, + "content": "3. Question mark - needs to be determined" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.706, + 0.383, + 0.73 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.737, + 0.29, + 0.744 + ], + "angle": 0, + "content": "**Looking for the Pattern:**" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.744, + 0.566, + 0.753 + ], + "angle": 0, + "content": "I need to understand how these sets relate to each other to find the right option for the question mark." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.76, + 0.713, + 0.769 + ], + "angle": 0, + "content": "The first figures in each set involve lines: one has organized parallel lines in a circle, the other has intersecting lines (one curved, one straight)." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.769, + 0.663, + 0.777 + ], + "angle": 0, + "content": "The second figures in each set involve lines at angles: one has crossed diagonal lines in a square, the other has a curved L-shape." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.777, + 0.811, + 0.793 + ], + "angle": 0, + "content": "For the third figure, the first set has a simple circle. Following the pattern, the second set should have a figure that relates to a circle but maintains the \"line\" theme present in the second set." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.799, + 0.289, + 0.807 + ], + "angle": 0, + "content": "**Analyzing the Options:**" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.807, + 0.335, + 0.815 + ], + "angle": 0, + "content": "A: Two crossed lines with a curved end" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.815, + 0.352, + 0.823 + ], + "angle": 0, + "content": "B: Two curved lines crossing in an X pattern" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.823, + 0.421, + 0.831 + ], + "angle": 0, + "content": "C: A circle divided into four equal parts by perpendicular lines" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.831, + 0.322, + 0.839 + ], + "angle": 0, + "content": "D: A swirl pattern with curved lines" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.847, + 0.236, + 0.854 + ], + "angle": 0, + "content": "**Decision:**" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.854, + 0.339, + 0.862 + ], + "angle": 0, + "content": "Option C makes the most sense because:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.862, + 0.421, + 0.87 + ], + "angle": 0, + "content": "1. It contains a circle (relating to the third figure in the first set)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.87, + 0.457, + 0.878 + ], + "angle": 0, + "content": "2. It includes straight lines (maintaining the line theme of the second set)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.878, + 0.681, + 0.886 + ], + "angle": 0, + "content": "3. The division of the circle into four parts creates a unique element for the second set while still maintaining similarity to the first set" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.892, + 0.228, + 0.9 + ], + "angle": 0, + "content": "Answer: C" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.76, + 0.811, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.224, + 0.913, + 0.773, + 0.93 + ], + "angle": 0, + "content": "Figure 25: A sample correct case of Analogical Reasoning (difficulty: Hard)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "38" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.428, + 0.113, + 0.562, + 0.13 + ], + "angle": 0, + "content": "Deductive (Easy)" + }, + { + "type": "image", + "bbox": [ + 0.594, + 0.113, + 0.613, + 0.128 + ], + "angle": 0, + "content": null + }, + { + "type": "table", + "bbox": [ + 0.186, + 0.135, + 0.321, + 0.24 + ], + "angle": 0, + "content": "
GOBDE
NPKAJ
OZGFQ
WVIYN
IQTH
" + }, + { + "type": "text", + "bbox": [ + 0.334, + 0.154, + 0.813, + 0.191 + ], + "angle": 0, + "content": "Question: Bruce ciphers words adopting the board demonstrated. For example, the word DVEJ is ciphered as GT WI GH NH. What word did Bruce cipher NQ OH OT WI?" + }, + { + "type": "text", + "bbox": [ + 0.336, + 0.192, + 0.627, + 0.205 + ], + "angle": 0, + "content": "Options: A: XOCR. B: DJWS. C: KQFV. D: KFQV" + }, + { + "type": "text", + "bbox": [ + 0.336, + 0.211, + 0.449, + 0.225 + ], + "angle": 0, + "content": "Ground Truth: (C)" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.248, + 0.813, + 0.267 + ], + "angle": 0, + "content": "Claude-3.7-Sonnet-Thinking: I see a 5x5 grid of letters that appears to be the board used for Bruce's cipher. Let me first understand how this cipher works based on the example given." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.271, + 0.46, + 0.282 + ], + "angle": 0, + "content": "The example states that the word \"DVEJ\" is ciphered as \"GT WI GH NH\"." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.287, + 0.29, + 0.296 + ], + "angle": 0, + "content": "Let me analyze the pattern:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.296, + 0.267, + 0.306 + ], + "angle": 0, + "content": "D is ciphered as GT" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.307, + 0.268, + 0.317 + ], + "angle": 0, + "content": "V is ciphered as W1 E is ciphered as GH" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.317, + 0.267, + 0.328 + ], + "angle": 0, + "content": "- 1 is ciphered as GH\n- j is ciphered as NH" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.296, + 0.268, + 0.328 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.334, + 0.525, + 0.344 + ], + "angle": 0, + "content": "I need to figure out the rule that transforms each letter into two letters based on this board." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.349, + 0.464, + 0.359 + ], + "angle": 0, + "content": "Looking at the board, I can see that the first column contains G, N, O, W, I." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.36, + 0.319, + 0.366 + ], + "angle": 0, + "content": "The first row contains G,O,B,D,E" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.366, + 0.388, + 0.375 + ], + "angle": 0, + "content": "The left column and top row are highlighted in green." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.375, + 0.341, + 0.383 + ], + "angle": 0, + "content": "The bottom row is highlighted in yellow." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.389, + 0.359, + 0.398 + ], + "angle": 0, + "content": "Let me try to understand how D becomes GT:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.398, + 0.395, + 0.406 + ], + "angle": 0, + "content": "- Looking at the board, D is in the first row, 4th column." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.406, + 0.352, + 0.414 + ], + "angle": 0, + "content": "-G is the letter in the first row, first column." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.414, + 0.364, + 0.422 + ], + "angle": 0, + "content": "- T is not directly visible in the board as shown." + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.349, + 0.464, + 0.422 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.427, + 0.582, + 0.438 + ], + "angle": 0, + "content": "Let me try a different approach. Maybe the cipher works by identifying the row and column of each letter." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.443, + 0.248, + 0.452 + ], + "angle": 0, + "content": "For the letter D:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.452, + 0.314, + 0.46 + ], + "angle": 0, + "content": "- D is in the first row, 4th column." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.46, + 0.343, + 0.468 + ], + "angle": 0, + "content": "- The letter at the beginning of row 1 is G." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.468, + 0.511, + 0.476 + ], + "angle": 0, + "content": "- The letter at the beginning of column 4 might be T (though I can't see it in this board)." + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.452, + 0.511, + 0.476 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.482, + 0.246, + 0.491 + ], + "angle": 0, + "content": "For the letter V:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.491, + 0.356, + 0.499 + ], + "angle": 0, + "content": "- V appears to be in the 4th row, 2nd column." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.499, + 0.344, + 0.507 + ], + "angle": 0, + "content": "- The letter at the beginning of row 4 is W." + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.491, + 0.356, + 0.507 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.507, + 0.486, + 0.515 + ], + "angle": 0, + "content": "- The letter at the beginning of column 2 might be I (though I can't see it clearly)." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.52, + 0.81, + 0.539 + ], + "angle": 0, + "content": "Looking more closely at the board, I notice that there's no column heading visible, but maybe the bottom row (I, Q, T, H) might be the column identifiers. This seems plausible." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.544, + 0.311, + 0.553 + ], + "angle": 0, + "content": "Let me check if this makes sense:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.553, + 0.477, + 0.562 + ], + "angle": 0, + "content": "- D is in row 1, column \\(4 \\rightarrow G\\) (row 1 identifier) + T (column 4 identifier) = GT" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.562, + 0.458, + 0.57 + ], + "angle": 0, + "content": "- V is in row 4, column \\(2 \\rightarrow W\\) (row 4 identifier) + Q (column 2 identifier)" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.553, + 0.477, + 0.57 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.575, + 0.453, + 0.585 + ], + "angle": 0, + "content": "But this doesn't match the example where V is ciphered as WI, not WQ." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.59, + 0.508, + 0.6 + ], + "angle": 0, + "content": "Maybe the column identifiers are different or the rule is different. Let me re-examine..." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.606, + 0.781, + 0.617 + ], + "angle": 0, + "content": "Actually, looking at the board again, I see that the column headers are likely the first row (G, O, B, D, E) and the row headers are the first column (G, N, O, W, I)." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.622, + 0.201, + 0.63 + ], + "angle": 0, + "content": "So:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.63, + 0.464, + 0.638 + ], + "angle": 0, + "content": "- D is at position (1, 4), so its coordinates would be GD (row G, column D)." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.638, + 0.341, + 0.647 + ], + "angle": 0, + "content": "- V isn't clearly visible in the grid shown." + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.63, + 0.464, + 0.647 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.652, + 0.691, + 0.663 + ], + "angle": 0, + "content": "Let me try yet another approach. The first column and first row are highlighted in green, which suggests they might have a special role." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.669, + 0.255, + 0.677 + ], + "angle": 0, + "content": "Maybe the rule is:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.677, + 0.434, + 0.685 + ], + "angle": 0, + "content": "- For each letter in the original word, find its position in the board." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.685, + 0.652, + 0.694 + ], + "angle": 0, + "content": "- The ciphered result is the corresponding letter from the first column followed by the corresponding letter from the first row." + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.677, + 0.652, + 0.694 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.699, + 0.276, + 0.708 + ], + "angle": 0, + "content": "Let me check this for D:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.708, + 0.757, + 0.717 + ], + "angle": 0, + "content": "- If D is at position (r, c) in the board, the ciphered result would be the letter in the first column at row r, followed by the letter in the first row at column c." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.722, + 0.444, + 0.733 + ], + "angle": 0, + "content": "But I still don't see how D becomes GT based on the board provided." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.738, + 0.497, + 0.747 + ], + "angle": 0, + "content": "Wait, maybe I need to interpret the board differently. Looking at the board, I notice:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.747, + 0.316, + 0.755 + ], + "angle": 0, + "content": "- G is in the first row, first column." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.755, + 0.492, + 0.764 + ], + "angle": 0, + "content": "- T might be in the bottom row, 3rd column (though it's labeled as T in the image)." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.764, + 0.316, + 0.772 + ], + "angle": 0, + "content": "- W is in the 4th row, first column." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.772, + 0.436, + 0.779 + ], + "angle": 0, + "content": "- I is in the 3rd row, 3rd column (or in the bottom row, 1st column)." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.779, + 0.316, + 0.787 + ], + "angle": 0, + "content": "- G is in the first row, first column." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.787, + 0.325, + 0.794 + ], + "angle": 0, + "content": "-H is in the bottom row, 4th column." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.794, + 0.316, + 0.802 + ], + "angle": 0, + "content": "- N is in the 2nd row, first column." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.802, + 0.325, + 0.81 + ], + "angle": 0, + "content": "- H is in the bottom row, 4th column." + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.747, + 0.436, + 0.81 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.816, + 0.314, + 0.825 + ], + "angle": 0, + "content": "I'm still not seeing a clear pattern." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.825, + 0.25, + 0.834 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.838, + 0.383, + 0.851 + ], + "angle": 0, + "content": "Error Category: Reject to Answer" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.851, + 0.813, + 0.9 + ], + "angle": 0, + "content": "Error Reason: The model reject to answer since it fails to settle on a single consistent ciphering rule or final answer. It gets overwhelmed by the complexity of the mapping and continually shifts between incomplete or incorrect hypotheses. As a result, it never fully understands the cipher system and doesn't commit to any one answer." + }, + { + "type": "image_caption", + "bbox": [ + 0.235, + 0.913, + 0.763, + 0.93 + ], + "angle": 0, + "content": "Figure 26: A sample error case of Deductive Reasoning (difficulty: Easy)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "39" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.428, + 0.112, + 0.562, + 0.13 + ], + "angle": 0, + "content": "Deductive (Easy)" + }, + { + "type": "image", + "bbox": [ + 0.594, + 0.113, + 0.614, + 0.127 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.214, + 0.14, + 0.511, + 0.289 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.548, + 0.157, + 0.801, + 0.23 + ], + "angle": 0, + "content": "Question: From the above food web diagram, what would happen to dragonfly if all mayfly die? Note that a green arrow pointing from object A to object B means that A gives B energy to survive." + }, + { + "type": "text", + "bbox": [ + 0.549, + 0.231, + 0.796, + 0.255 + ], + "angle": 0, + "content": "Options: A: remains the same. B: increase. C: decrease. D: NA" + }, + { + "type": "text", + "bbox": [ + 0.55, + 0.265, + 0.663, + 0.279 + ], + "angle": 0, + "content": "Ground Truth: (C)" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.299, + 0.81, + 0.326 + ], + "angle": 0, + "content": "Claude-3.7-Sonnet-Thinking: Let me analyze the food web diagram to understand the relationships between different organisms." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.336, + 0.288, + 0.348 + ], + "angle": 0, + "content": "In this food web:" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.348, + 0.799, + 0.361 + ], + "angle": 0, + "content": "- Green arrows point from an organism that gives energy to another organism (food source \\(\\rightarrow\\) consumer)" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.361, + 0.695, + 0.374 + ], + "angle": 0, + "content": "- I need to examine what would happen to dragonfly nymphs if all mayfly nymphs die" + }, + { + "type": "list", + "bbox": [ + 0.185, + 0.348, + 0.799, + 0.374 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.385, + 0.331, + 0.398 + ], + "angle": 0, + "content": "Looking at the diagram:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.398, + 0.606, + 0.41 + ], + "angle": 0, + "content": "- Mayfly nymphs feed on pondweed (arrow from pondweed to mayfly)" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.41, + 0.649, + 0.422 + ], + "angle": 0, + "content": "- Dragonfly nymphs feed on mayfly nymphs (arrow from mayfly to dragonfly)" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.422, + 0.756, + 0.435 + ], + "angle": 0, + "content": "- Dragonfly nymphs also feed on freshwater shrimp (arrow from freshwater shrimp to dragonfly)" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.435, + 0.66, + 0.448 + ], + "angle": 0, + "content": "- Brown trout feeds on dragonfly nymphs (arrow from dragonfly to brown trout)" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.398, + 0.756, + 0.448 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.458, + 0.336, + 0.471 + ], + "angle": 0, + "content": "If all mayfly nymphs die:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.471, + 0.627, + 0.484 + ], + "angle": 0, + "content": "- Dragonfly nymphs would lose one of their food sources (mayfly nymphs)" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.484, + 0.613, + 0.495 + ], + "angle": 0, + "content": "- Dragonfly nymphs would still have freshwater shrimp as a food source" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.495, + 0.704, + 0.509 + ], + "angle": 0, + "content": "- But losing mayfly nymphs would reduce the total available food for dragonfly nymphs" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.471, + 0.704, + 0.509 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.519, + 0.811, + 0.556 + ], + "angle": 0, + "content": "Therefore, the population of dragonfly nymphs would likely decrease because one of their food sources is gone, although they would not go extinct since they have another food source. This makes the correct answer (C) decrease." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.557, + 0.282, + 0.569 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.569, + 0.525, + 0.581 + ], + "angle": 0, + "content": "Analysis of Food Web and Effect on Dragonfly Nymphs" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.592, + 0.811, + 0.619 + ], + "angle": 0, + "content": "Looking at the food web diagram, I need to determine what would happen to dragonfly nymphs if all mayfly nymphs died out." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.629, + 0.376, + 0.642 + ], + "angle": 0, + "content": "Examining the Relationships" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.642, + 0.808, + 0.667 + ], + "angle": 0, + "content": "I can see that green arrows point from one organism to another, indicating energy transfer (food source \\(\\rightarrow\\) consumer)." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.678, + 0.381, + 0.69 + ], + "angle": 0, + "content": "From the diagram, I can observe:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.691, + 0.403, + 0.703 + ], + "angle": 0, + "content": "- Mayfly nymphs feed on pondweed" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.703, + 0.45, + 0.716 + ], + "angle": 0, + "content": "- Dragonfly nymphs feed on mayfly nymphs" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.716, + 0.491, + 0.728 + ], + "angle": 0, + "content": "- Dragonfly nymphs also feed on freshwater shrimp" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.729, + 0.432, + 0.741 + ], + "angle": 0, + "content": "- Brown trout feeds on dragonfly nymphs" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.691, + 0.491, + 0.741 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.752, + 0.303, + 0.765 + ], + "angle": 0, + "content": "## Impact Analysis" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.765, + 0.345, + 0.777 + ], + "angle": 0, + "content": "If all mayfly nymphs died:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.777, + 0.578, + 0.789 + ], + "angle": 0, + "content": "1. Dragonfly nymphs would lose one of their primary food sources" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.789, + 0.691, + 0.8 + ], + "angle": 0, + "content": "2. Dragonfly nymphs would still have freshwater shrimp as an alternative food source" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.801, + 0.77, + 0.814 + ], + "angle": 0, + "content": "3. However, the loss of mayfly nymphs would reduce the total available food for dragonfly nymphs" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.777, + 0.77, + 0.814 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.825, + 0.275, + 0.836 + ], + "angle": 0, + "content": "Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.837, + 0.811, + 0.876 + ], + "angle": 0, + "content": "Since dragonfly nymphs would lose a significant food source (mayfly nymphs), their population would likely decline, even though they have another food source. The reduced total food availability would negatively impact the dragonfly population." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.886, + 0.252, + 0.898 + ], + "angle": 0, + "content": "Answer: C" + }, + { + "type": "image_caption", + "bbox": [ + 0.227, + 0.913, + 0.769, + 0.93 + ], + "angle": 0, + "content": "Figure 27: A sample correct case of Deductive Reasoning (difficulty: Easy)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "40" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.413, + 0.112, + 0.577, + 0.129 + ], + "angle": 0, + "content": "Deductive (Medium)" + }, + { + "type": "image", + "bbox": [ + 0.594, + 0.113, + 0.614, + 0.128 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.18, + 0.149, + 0.365, + 0.21 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.37, + 0.141, + 0.813, + 0.202 + ], + "angle": 0, + "content": "Question: In a tournament each of the 6 teams plays one match against every other team. In each round of matches, 3 take place simultaneously. A TV station has already decided which match it will broadcast for each round, as shown in the diagram. In which round will team D play against team F?" + }, + { + "type": "text", + "bbox": [ + 0.371, + 0.203, + 0.545, + 0.216 + ], + "angle": 0, + "content": "Options: A: 1. B: 2. C: 3. D: 4" + }, + { + "type": "title", + "bbox": [ + 0.371, + 0.223, + 0.484, + 0.237 + ], + "angle": 0, + "content": "Ground Truth: (A)" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.246, + 0.813, + 0.265 + ], + "angle": 0, + "content": "Claude-3.7-Sonnet-Thinking: This question is asking about a tournament where each team plays one match against every other team. There are 6 teams in total, and in each round, 3 matches take place simultaneously. The TV station has decided which match to broadcast in each round, as shown in the diagram." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.27, + 0.336, + 0.28 + ], + "angle": 0, + "content": "Let's analyze what the diagram tells us:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.286, + 0.355, + 0.294 + ], + "angle": 0, + "content": "- Round 1: The match being broadcast is A-B" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.294, + 0.354, + 0.302 + ], + "angle": 0, + "content": "- Round 2: The match being broadcast is C-D" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.302, + 0.354, + 0.309 + ], + "angle": 0, + "content": "- Round 3: The match being broadcast is A-E" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.309, + 0.354, + 0.317 + ], + "angle": 0, + "content": "- Round 4: The match being broadcast is E-F" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.317, + 0.355, + 0.325 + ], + "angle": 0, + "content": "- Round 5: The match being broadcast is A-C" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.286, + 0.355, + 0.325 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.332, + 0.451, + 0.342 + ], + "angle": 0, + "content": "We need to determine in which round team D will play against team F." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.347, + 0.812, + 0.366 + ], + "angle": 0, + "content": "First, let's think about how many rounds there will be in total. With 6 teams, each team needs to play against 5 other teams. So there are a total of \\((6 \\times 5) / 2 = 15\\) matches. In each round, 3 matches take place, so there will be \\(15 / 3 = 5\\) rounds in total." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.371, + 0.812, + 0.389 + ], + "angle": 0, + "content": "The diagram shows the match being broadcast in each round, but it doesn't tell us what the other matches in each round are. To figure this out, I need to work out the full schedule of matches." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.394, + 0.772, + 0.412 + ], + "angle": 0, + "content": "In round 1, A plays against B (as shown in the diagram). So teams C, D, E, and F must form the other two matches in round 1. Let's say these are C-D and E-F. In round 2, C plays against D (as shown in the diagram). So teams A, B, E, and F must form the other two matches in round 2. Let's say these are A-E and B-F." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.417, + 0.635, + 0.428 + ], + "angle": 0, + "content": "But wait, this doesn't work because we've already paired up A-B, C-D, and E-F in round 1. Let's try a different approach." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.433, + 0.813, + 0.451 + ], + "angle": 0, + "content": "Let's try to systematically figure out the schedule. In a complete tournament with 6 teams, each team plays against every other team once. So each team plays 5 matches. There are a total of \\((6 \\times 5) / 2 = 15\\) matches. With 3 matches per round, we need \\(15 / 3 = 5\\) rounds, as I calculated earlier." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.456, + 0.471, + 0.466 + ], + "angle": 0, + "content": "I'll use the constraints from the diagram to help determine the full schedule." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.471, + 0.662, + 0.482 + ], + "angle": 0, + "content": "Round 1: A-B is one of the matches. The other two matches must use teams C, D, E, and F. Let's try to determine what those are." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.488, + 0.522, + 0.497 + ], + "angle": 0, + "content": "Round 2: C-D is one of the matches. The other two matches must use teams A, B, E, and F." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.503, + 0.522, + 0.512 + ], + "angle": 0, + "content": "Round 3: A-E is one of the matches. The other two matches must use teams B, C, D, and F." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.518, + 0.522, + 0.528 + ], + "angle": 0, + "content": "Round 4: E-F is one of the matches. The other two matches must use teams A, B, C, and D." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.534, + 0.522, + 0.544 + ], + "angle": 0, + "content": "Round 5: A-C is one of the matches. The other two matches must use teams B, D, E, and F." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.55, + 0.404, + 0.56 + ], + "angle": 0, + "content": "I'll try filling in a table of which teams play in each round." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.566, + 0.272, + 0.575 + ], + "angle": 0, + "content": "Round 1: A-B, C-? E-?" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.575, + 0.272, + 0.583 + ], + "angle": 0, + "content": "Round 2: C-D, A-? , ?-" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.583, + 0.269, + 0.59 + ], + "angle": 0, + "content": "Round 3: A-E, ?-? , ?-?" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.59, + 0.269, + 0.598 + ], + "angle": 0, + "content": "Round 4: E-F, ?-? , ?-?" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.598, + 0.269, + 0.605 + ], + "angle": 0, + "content": "Round 5: A-C, ?-? , ?-?" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.612, + 0.706, + 0.622 + ], + "angle": 0, + "content": "Actually, the question is specifically about when team D plays against team F. So, I really just need to find out which round that happens in." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.628, + 0.459, + 0.636 + ], + "angle": 0, + "content": "From the diagram, we know which matches are broadcast in each round:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.637, + 0.245, + 0.643 + ], + "angle": 0, + "content": "- Round 1: A-B" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.643, + 0.245, + 0.65 + ], + "angle": 0, + "content": "- Round 2: C-D" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.65, + 0.245, + 0.657 + ], + "angle": 0, + "content": "- Round 3: A-F" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.657, + 0.245, + 0.665 + ], + "angle": 0, + "content": "Round 9: A-E Round 4: F-E" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.665, + 0.245, + 0.672 + ], + "angle": 0, + "content": "Round 1: A C" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.637, + 0.245, + 0.672 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.682, + 0.307, + 0.69 + ], + "angle": 0, + "content": "So the matches involving D are:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.691, + 0.297, + 0.699 + ], + "angle": 0, + "content": "- Round 2: D plays against C." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.699, + 0.434, + 0.707 + ], + "angle": 0, + "content": "- Other rounds: D plays against A, B, E, and F (one in each round)." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.714, + 0.297, + 0.722 + ], + "angle": 0, + "content": "The matches involving F are:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.722, + 0.434, + 0.739 + ], + "angle": 0, + "content": "- Round 4: F plays against E. \n- Other rounds: F plays against A, B, C, and D (one in each round)." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.744, + 0.81, + 0.762 + ], + "angle": 0, + "content": "Now, in which round do D and F play against each other? It can't be round 2, because D plays against C in that round. It can't be round 4, because F plays against E in that round. So D and F play against each other in either round 1, 3, or 5." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.768, + 0.595, + 0.777 + ], + "angle": 0, + "content": "Let's check round 1. We know A plays against B. D and F could potentially play against each other in round 1." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.777, + 0.81, + 0.792 + ], + "angle": 0, + "content": "Let's check round 3. We know A plays against E. The other teams (B, C, D, and F) form the other two matches. If D plays against F in this round, then B must play against C." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.792, + 0.81, + 0.808 + ], + "angle": 0, + "content": "Let's check round 5. We know A plays against C. The other teams (B, D, E, and F) form the other two matches. If D plays against F in this round, then B must play against E." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.814, + 0.422, + 0.822 + ], + "angle": 0, + "content": "Hmm, I'm having trouble determining when D plays against F." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.822, + 0.249, + 0.831 + ], + "angle": 0, + "content": "" + }, + { + "type": "title", + "bbox": [ + 0.186, + 0.837, + 0.383, + 0.849 + ], + "angle": 0, + "content": "Error Category: Reject to Answer" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.849, + 0.812, + 0.898 + ], + "angle": 0, + "content": "Error Reason: The model rejects to answer since it fails to complete the full tournament schedule or isolate the specific round where D plays F. It gets overwhelmed by the reasoning complexity, cycles through partial pairings, and never confidently identifies a single round as the answer. As a result, it ends with indecision and no final response." + }, + { + "type": "image_caption", + "bbox": [ + 0.22, + 0.913, + 0.776, + 0.93 + ], + "angle": 0, + "content": "Figure 28: A sample error case of Deductive Reasoning (difficulty: Medium)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "41" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.413, + 0.112, + 0.577, + 0.129 + ], + "angle": 0, + "content": "Deductive (Medium)" + }, + { + "type": "image", + "bbox": [ + 0.594, + 0.113, + 0.614, + 0.127 + ], + "angle": 0, + "content": null + }, + { + "type": "table", + "bbox": [ + 0.179, + 0.136, + 0.318, + 0.242 + ], + "angle": 0, + "content": "
LWBVMQ
ZPUKGJ
ADHLRZ
GSXEOY
KICAFT
MFICS
" + }, + { + "type": "text", + "bbox": [ + 0.327, + 0.154, + 0.798, + 0.191 + ], + "angle": 0, + "content": "Question: Jeff represents words applying the board demonstrated. For example, the word DCXEH is represented as AM KF GF GI AF. What word did Jeff represent KS KF LC LF GM?" + }, + { + "type": "text", + "bbox": [ + 0.329, + 0.192, + 0.658, + 0.205 + ], + "angle": 0, + "content": "Options: A: TSMBC. B: TCMBS. C: PEQGT. D: FHMTK" + }, + { + "type": "title", + "bbox": [ + 0.329, + 0.215, + 0.442, + 0.229 + ], + "angle": 0, + "content": "Ground Truth: (B)" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.244, + 0.813, + 0.263 + ], + "angle": 0, + "content": "Claude-3.7-Sonnet-Thinking: I'm given a grid or board that represents a mapping from letters to two-letter codes. Let's examine how the mapping works by looking at the example." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.268, + 0.39, + 0.278 + ], + "angle": 0, + "content": "The word DCXEH is represented as AM KF GF GI AF." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.285, + 0.3, + 0.293 + ], + "angle": 0, + "content": "Let me determine the pattern:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.293, + 0.282, + 0.299 + ], + "angle": 0, + "content": "- D is represented as AM" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.299, + 0.277, + 0.308 + ], + "angle": 0, + "content": "- C is represented as KF" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.308, + 0.302, + 0.315 + ], + "angle": 0, + "content": "- X is represented as GF" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.315, + 0.302, + 0.322 + ], + "angle": 0, + "content": "E is represented as GI" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.322, + 0.278, + 0.331 + ], + "angle": 0, + "content": "- H is represented as AF" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.293, + 0.302, + 0.331 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.338, + 0.733, + 0.348 + ], + "angle": 0, + "content": "Looking at the image, I see a grid with rows labeled with L, Z, A, G, K and columns labeled with W, B, V, M, Q, and a bottom row with M, F, I, C, S." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.354, + 0.692, + 0.364 + ], + "angle": 0, + "content": "To understand how this board works for encoding, I need to figure out how each letter in the original word maps to the two-letter code." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.37, + 0.646, + 0.379 + ], + "angle": 0, + "content": "For D, it maps to AM. D appears in the first column of the row labeled A. So A (row) + M (column bottom row) = AM for D." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.379, + 0.641, + 0.387 + ], + "angle": 0, + "content": "For C, it maps to KF. C appears in the third column of the row labeled K. So K (row) + F (column bottom row) = KF for C." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.387, + 0.639, + 0.395 + ], + "angle": 0, + "content": "For X, it maps to GF. X appears in the third column of the row labeled G. So G (row) + F (column bottom row) = GF for X." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.395, + 0.639, + 0.402 + ], + "angle": 0, + "content": "For E, it maps to GI. E appears in the fourth column of the row labeled G. So G (row) + I (column bottom row) = GI for E." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.402, + 0.641, + 0.41 + ], + "angle": 0, + "content": "For H, it maps to AF. H appears in the third column of the row labeled A. So A (row) + F (column bottom row) = AF for H." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.417, + 0.287, + 0.425 + ], + "angle": 0, + "content": "So the pattern seems to be:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.425, + 0.298, + 0.432 + ], + "angle": 0, + "content": "1. Locate the letter in the grid" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.432, + 0.352, + 0.439 + ], + "angle": 0, + "content": "2. The first letter of the code is the row label" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.44, + 0.578, + 0.448 + ], + "angle": 0, + "content": "3. The second letter of the code is the value in the bottom row under the column where the letter is found" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.456, + 0.322, + 0.464 + ], + "angle": 0, + "content": "Now let's decode KS KF LC LF GM:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.471, + 0.202, + 0.479 + ], + "angle": 0, + "content": "KS:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.479, + 0.499, + 0.487 + ], + "angle": 0, + "content": "Row K, column with bottom row value S. This refers to column 5, which contains T." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.495, + 0.202, + 0.502 + ], + "angle": 0, + "content": "KF:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.502, + 0.5, + 0.511 + ], + "angle": 0, + "content": "Row K, column with bottom row value F. This refers to column 2, which contains C." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.518, + 0.202, + 0.525 + ], + "angle": 0, + "content": "LC:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.525, + 0.502, + 0.534 + ], + "angle": 0, + "content": "Row L, column with bottom row value C. This refers to column 4, which contains M." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.542, + 0.2, + 0.548 + ], + "angle": 0, + "content": "LF:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.548, + 0.498, + 0.556 + ], + "angle": 0, + "content": "Row L, column with bottom row value F. This refers to column 2, which contains B." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.564, + 0.202, + 0.57 + ], + "angle": 0, + "content": "GM" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.572, + 0.502, + 0.581 + ], + "angle": 0, + "content": "Row G, column with bottom row value M. This refers to column 1, which contains S." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.588, + 0.308, + 0.595 + ], + "angle": 0, + "content": "So the decoded word is TCMBS." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.596, + 0.249, + 0.604 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.604, + 0.327, + 0.612 + ], + "angle": 0, + "content": "# Decoding the Word Representation" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.619, + 0.595, + 0.628 + ], + "angle": 0, + "content": "I need to figure out how Jeff's encoding system works based on the example, then decode the given sequence." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.635, + 0.338, + 0.644 + ], + "angle": 0, + "content": "## Understanding the Encoding System" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.651, + 0.247, + 0.659 + ], + "angle": 0, + "content": "In the example:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.659, + 0.355, + 0.666 + ], + "angle": 0, + "content": "- DCXEH is represented as AM KF GFI AF" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.674, + 0.482, + 0.683 + ], + "angle": 0, + "content": "Looking at the grid provided in the image, I can see this is a coordinate system:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.683, + 0.364, + 0.69 + ], + "angle": 0, + "content": "- The rows are labeled with letters L, Z, A, G, K" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.69, + 0.404, + 0.697 + ], + "angle": 0, + "content": "- The columns have a bottom row with values M, F, I, C, S" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.705, + 0.328, + 0.712 + ], + "angle": 0, + "content": "The encoding seems to work like this:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.712, + 0.43, + 0.721 + ], + "angle": 0, + "content": "1. For each letter in the original word, find its position in the grid" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.721, + 0.347, + 0.728 + ], + "angle": 0, + "content": "2. The first part of the code is the row label" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.728, + 0.436, + 0.736 + ], + "angle": 0, + "content": "3. The second part is the value from the bottom row of that column" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.744, + 0.307, + 0.752 + ], + "angle": 0, + "content": "Let me verify with the example:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.752, + 0.37, + 0.76 + ], + "angle": 0, + "content": "- D is in row A, column with M at bottom \\(\\rightarrow\\) AM" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.76, + 0.361, + 0.767 + ], + "angle": 0, + "content": "- C is in row K, column with F at bottom \\(\\rightarrow\\) KF" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.767, + 0.361, + 0.775 + ], + "angle": 0, + "content": "- X is in row G, column with F at bottom \\(\\rightarrow\\) GP" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.775, + 0.36, + 0.782 + ], + "angle": 0, + "content": "- E is in row G, column with I at bottom \\(\\rightarrow\\) GI" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.782, + 0.362, + 0.79 + ], + "angle": 0, + "content": "- H is in row \\( A_{t} \\) column with F at bottom \\( \\rightarrow \\mathrm{AF} \\)." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.798, + 0.313, + 0.807 + ], + "angle": 0, + "content": "## Decoding the Given Sequence" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.814, + 0.321, + 0.822 + ], + "angle": 0, + "content": "Now let's decode KS KF LC LF GM:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.829, + 0.394, + 0.836 + ], + "angle": 0, + "content": "- KS: Row K, column with S at bottom \\(\\rightarrow\\) The letter is T" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.837, + 0.394, + 0.845 + ], + "angle": 0, + "content": "- KF: Row K, column with F at bottom \\(\\rightarrow\\) The letter is C" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.845, + 0.397, + 0.853 + ], + "angle": 0, + "content": "- LC: Row L, column with C at bottom \\(\\rightarrow\\) The letter is M" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.853, + 0.391, + 0.861 + ], + "angle": 0, + "content": "- LF: Row \\( \\mathrm{L}_r \\), column with F at bottom → The letter is B" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.861, + 0.4, + 0.868 + ], + "angle": 0, + "content": "- GM: Row G, column with M at bottom \\(\\rightarrow\\) The letter is S" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.876, + 0.369, + 0.884 + ], + "angle": 0, + "content": "This gives us TCMBS, which matches option (B)." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.891, + 0.228, + 0.899 + ], + "angle": 0, + "content": "Answer: B" + }, + { + "type": "image_caption", + "bbox": [ + 0.213, + 0.913, + 0.783, + 0.93 + ], + "angle": 0, + "content": "Figure 29: A sample correct case of Deductive Reasoning (difficulty: Medium)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "42" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.426, + 0.113, + 0.564, + 0.129 + ], + "angle": 0, + "content": "Deductive (Hard)" + }, + { + "type": "image", + "bbox": [ + 0.594, + 0.113, + 0.613, + 0.127 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.214, + 0.135, + 0.437, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.468, + 0.141, + 0.793, + 0.215 + ], + "angle": 0, + "content": "Question: Jan cannot draw very accurately but nevertheless he tried to produce a roadmap of his village. The relative position of the houses and the street crossings are all correct but three of the roads are actually straight and only Qurwik street is not. Who lives in Qurwik street?" + }, + { + "type": "text", + "bbox": [ + 0.469, + 0.215, + 0.733, + 0.228 + ], + "angle": 0, + "content": "Options: A: Amy. B: Ben. C: Carol. D: David" + }, + { + "type": "title", + "bbox": [ + 0.47, + 0.24, + 0.583, + 0.254 + ], + "angle": 0, + "content": "Ground Truth: (C)" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.268, + 0.712, + 0.279 + ], + "angle": 0, + "content": "Claude-3.7-Sonnet-Thinking: In this problem, I'm given a drawing of a village roadmap made by Jan. According to the problem:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.28, + 0.432, + 0.285 + ], + "angle": 0, + "content": "1. The relative positions of houses and street crossings are correct" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.285, + 0.517, + 0.293 + ], + "angle": 0, + "content": "2. Three of the roads are actually straight in reality, but only Qurwik street is not straight" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.293, + 0.449, + 0.3 + ], + "angle": 0, + "content": "3. The houses are labeled with the names: Amy, Ben, Carol, and David" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.3, + 0.373, + 0.308 + ], + "angle": 0, + "content": "4. I need to determine who lives on Qurwik street" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.28, + 0.517, + 0.308 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.314, + 0.658, + 0.324 + ], + "angle": 0, + "content": "Looking at the image, I can see a diagram with four houses labeled with the names and several curved roads connecting them." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.33, + 0.761, + 0.34 + ], + "angle": 0, + "content": "Since three roads are actually straight and only Qurwik street is not, I need to identify which road would most likely not be straight based on the diagram." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.346, + 0.334, + 0.354 + ], + "angle": 0, + "content": "The roads in the diagram appear to be:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.355, + 0.372, + 0.362 + ], + "angle": 0, + "content": "1. A road connecting Amy's house to the network" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.363, + 0.375, + 0.37 + ], + "angle": 0, + "content": "2. A road connecting Carol's house to the network" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.37, + 0.373, + 0.377 + ], + "angle": 0, + "content": "3. A road connecting David's house to the network" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.378, + 0.368, + 0.385 + ], + "angle": 0, + "content": "4. A road connecting Ben's house to the network" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.386, + 0.388, + 0.394 + ], + "angle": 0, + "content": "5. Various segments connecting these houses together" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.355, + 0.388, + 0.394 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.401, + 0.811, + 0.417 + ], + "angle": 0, + "content": "Since we're told that only one road is not straight, and we see multiple curved segments in the diagram, it must be that these curved segments represent the same road - Qurwik street." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.423, + 0.583, + 0.433 + ], + "angle": 0, + "content": "Looking carefully at the diagram, I need to determine which person's house is connected to Qurwik street." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.439, + 0.79, + 0.449 + ], + "angle": 0, + "content": "The diagram shows curved roads connecting different houses. Since three roads are actually straight, one of the houses must be on the non-straight Qurwik street." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.455, + 0.811, + 0.472 + ], + "angle": 0, + "content": "Looking at the connectivity in the diagram, it appears that there's a curved path that runs between multiple houses. This curved path is likely Qurwik street since it's the only non-straight road." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.478, + 0.811, + 0.495 + ], + "angle": 0, + "content": "Based on the diagram, it looks like Ben's house is at one end of what appears to be a major curved road. David's house seems to be connected to this same curved road. Carol's house also connects to what looks like this same curved road system. Amy's house seems to be connected to part of this curved network as well." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.501, + 0.811, + 0.518 + ], + "angle": 0, + "content": "This is a bit ambiguous from the diagram alone, as I can't clearly tell which specific segment is Qurwik street versus other roads that might be straight in reality despite being drawn curved." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.525, + 0.811, + 0.542 + ], + "angle": 0, + "content": "Without more specific information about which road is Qurwik street, I would have to make an inference based on which house seems most connected to curved paths in the diagram." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.549, + 0.421, + 0.556 + ], + "angle": 0, + "content": "Looking at the houses and their positions relative to the roads:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.557, + 0.43, + 0.564 + ], + "angle": 0, + "content": "- Amy's house appears to be connected to a curved road segment" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.565, + 0.425, + 0.573 + ], + "angle": 0, + "content": "- Ben's house appears to be at the end of a curved road segment" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.574, + 0.374, + 0.581 + ], + "angle": 0, + "content": "- Carol's house connects to a curved road segment" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.581, + 0.389, + 0.588 + ], + "angle": 0, + "content": "- David's house connects to multiple curved segments" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.557, + 0.43, + 0.588 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.595, + 0.811, + 0.612 + ], + "angle": 0, + "content": "Based on the diagram, David's house seems to be most central to the curved road network, with multiple curved segments connecting to it. This suggests David might be on Qurwik street." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.618, + 0.583, + 0.626 + ], + "angle": 0, + "content": "However, this is somewhat ambiguous without more clear labeling of which road is which in the diagram." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.627, + 0.248, + 0.634 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.635, + 0.338, + 0.643 + ], + "angle": 0, + "content": "Solving the Village Roadmap Problem" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.649, + 0.668, + 0.659 + ], + "angle": 0, + "content": "I need to determine who lives on Qurwik street, given that three roads are actually straight and only Qurwik street is not straight." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.665, + 0.808, + 0.682 + ], + "angle": 0, + "content": "Looking at the image, I can see houses belonging to Amy, Ben, Carol, and David connected by various curved roads. Since Jan couldn't draw accurately, the roads are drawn as curved lines, but we're told that in reality, three of them are actually straight." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.688, + 0.311, + 0.698 + ], + "angle": 0, + "content": "Analyzing the Road Network" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.705, + 0.282, + 0.712 + ], + "angle": 0, + "content": "In the diagram, I can see:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.713, + 0.39, + 0.72 + ], + "angle": 0, + "content": "- Four houses connected by a network of curved roads" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.721, + 0.353, + 0.727 + ], + "angle": 0, + "content": "- Multiple intersections between these roads" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.728, + 0.356, + 0.735 + ], + "angle": 0, + "content": "Each house is connected to at least one road" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.743, + 0.298, + 0.752 + ], + "angle": 0, + "content": "Identifying Qurwik Street" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.758, + 0.616, + 0.768 + ], + "angle": 0, + "content": "To determine which road is Qurwik street (the only non-straight road), I need to analyze the pattern of connections." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.774, + 0.811, + 0.799 + ], + "angle": 0, + "content": "Looking at the diagram carefully, I notice that David's house appears to be connected to a road that makes a distinctive curved path. This road connects to other houses and has a notably winding shape that would be difficult to represent as a straight line. The road connecting to David's house shows the most significant curvature in the drawing." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.805, + 0.811, + 0.822 + ], + "angle": 0, + "content": "Since the problem states that three roads are actually straight and only Qurwik street is not, and David's house connects to what appears to be the most significantly curved road in the diagram, it's reasonable to conclude that David lives on Qurwik street." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.828, + 0.23, + 0.837 + ], + "angle": 0, + "content": "Answer: D" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.847, + 0.407, + 0.859 + ], + "angle": 0, + "content": "Error Category: Understanding Error" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.86, + 0.811, + 0.895 + ], + "angle": 0, + "content": "Error Reason: The model misinterprets the condition that only one road (Qurwik street) is not straight, while the other three are. It misunderstands the roads that seem to be curved in the image are not all really curved." + }, + { + "type": "image_caption", + "bbox": [ + 0.232, + 0.913, + 0.764, + 0.93 + ], + "angle": 0, + "content": "Figure 30: A sample error case of Deductive Reasoning (difficulty: Hard)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "43" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.426, + 0.112, + 0.564, + 0.129 + ], + "angle": 0, + "content": "Deductive (Hard)" + }, + { + "type": "image", + "bbox": [ + 0.594, + 0.113, + 0.613, + 0.127 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.181, + 0.142, + 0.38, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.387, + 0.144, + 0.803, + 0.229 + ], + "angle": 0, + "content": "Question: Diana draws a rectangle made up of twelve squares onto a piece of squared paper. Some of the squares are coloured in black. She writes the number of adjacent black squares into every white square. The diagram shows an example of such a rectangle. Now she does the same with a rectangle made up of 2018 squares. What is the biggest number that she can obtain as the sum of all numbers in the white squares?" + }, + { + "type": "text", + "bbox": [ + 0.389, + 0.23, + 0.641, + 0.242 + ], + "angle": 0, + "content": "Options: A: 2800. B: 2950. C: 3025. D: 3100" + }, + { + "type": "text", + "bbox": [ + 0.39, + 0.25, + 0.502, + 0.264 + ], + "angle": 0, + "content": "Ground Truth: (C)" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.275, + 0.812, + 0.293 + ], + "angle": 0, + "content": "Claude-3.7-Sonnet-Thinking: In this problem, Diana has a rectangle made up of squares, some of which are colored black. Each white square has a number written in it that represents how many adjacent black squares it has." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.299, + 0.27, + 0.308 + ], + "angle": 0, + "content": "In the example given:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.308, + 0.296, + 0.316 + ], + "angle": 0, + "content": "- The first row: 1, [black], 2, 1" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.316, + 0.307, + 0.323 + ], + "angle": 0, + "content": "- The second row: 0, 3, [black], 1" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.323, + 0.3, + 0.332 + ], + "angle": 0, + "content": "- The third row: 1, [black], 2, 1" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.308, + 0.307, + 0.332 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.338, + 0.597, + 0.348 + ], + "angle": 0, + "content": "The rectangle has 12 squares total, with 4 of them being black (marked as [black]) and 8 white (with numbers)." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.353, + 0.745, + 0.363 + ], + "angle": 0, + "content": "Now, Diana does the same with a rectangle made up of 2018 squares. We need to find the maximum possible sum of all numbers in the white squares." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.369, + 0.81, + 0.386 + ], + "angle": 0, + "content": "Let's think about how to maximize this sum. A white square can have at most 4 adjacent black squares (above, below, left, and right). So each white square can contribute at most 4 to the sum." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.392, + 0.81, + 0.409 + ], + "angle": 0, + "content": "Let's consider the example given, which is a \\(3 \\times 4\\) grid. There are 4 black squares and 8 white squares. The sum of numbers in white squares is \\(1 + 2 + 1 + 0 + 3 + 1 + 1 + 2 + 1 = 12\\)." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.415, + 0.676, + 0.426 + ], + "angle": 0, + "content": "I think I need to determine how to maximize the sum by choosing the right number of black squares and arranging them optimally." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.431, + 0.81, + 0.449 + ], + "angle": 0, + "content": "One approach is to place the black squares in a pattern such that each white square has as many adjacent black squares as possible. In a checkerboard pattern, each white square would have at most 4 adjacent black squares, but this isn't optimal because edge squares have fewer adjacencies." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.454, + 0.81, + 0.472 + ], + "angle": 0, + "content": "Act, t a h n ." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.478, + 0.81, + 0.495 + ], + "angle": 0, + "content": "For a bipartite graph, the maximum cut is achieved by putting all nodes of one type on one side and all nodes of the other type on the other side. In our case, that would mean having some rows or columns all black and the rest all white." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.501, + 0.81, + 0.526 + ], + "angle": 0, + "content": "Let's say our grid is \\( \\mathbf{r} \\times \\mathbf{c} \\), and we color some rows all black and the rest all white. If we color k rows all black, then the number of adjacencies between black and white squares would be \\( \\mathrm{k} \\times \\mathrm{c} \\times 2 \\) (because each row has c squares, and each black square in a row can have at most 2 adjacencies with white squares, one above and one below)." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.532, + 0.81, + 0.557 + ], + "angle": 0, + "content": "Let's consider a more specific pattern. Imagine our grid is \\(2 \\times 1009\\) (so it has 2018 squares total). We color the bottom row all black and the top row all white. Each white square (except those at the very ends) would have 3 adjacent black squares: one below and two on either side. The white squares at the ends would have 2 adjacent black squares." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.563, + 0.745, + 0.573 + ], + "angle": 0, + "content": "So, we'd have 1007 white squares with 3 adjacencies each and 2 white squares with 2 adjacencies each. The total sum would be \\(1007 \\times 3 + 2 \\times 2 = 3025\\)." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.573, + 0.249, + 0.581 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.58, + 0.413, + 0.589 + ], + "angle": 0, + "content": "Solving the Maximum Sum Problem for Diana's Rectangle" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.595, + 0.665, + 0.605 + ], + "angle": 0, + "content": "I need to find the maximum possible sum of the numbers in white squares when Diana creates a rectangle made of 2018 squares." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.61, + 0.306, + 0.618 + ], + "angle": 0, + "content": "## Understanding the Problem" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.618, + 0.406, + 0.626 + ], + "angle": 0, + "content": "- Diana draws a rectangle with some squares colored black." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.626, + 0.521, + 0.634 + ], + "angle": 0, + "content": "- Each white square contains a number indicating how many adjacent black squares it has" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.634, + 0.397, + 0.643 + ], + "angle": 0, + "content": "- Adjacent means sharing an edge (up, down, left, right)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.643, + 0.518, + 0.651 + ], + "angle": 0, + "content": "- Maximum possible number for any white square is 4 (if all 4 adjacent squares are black)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.651, + 0.447, + 0.659 + ], + "angle": 0, + "content": "- We need to find the maximum possible sum across all white squares" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.618, + 0.521, + 0.659 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.665, + 0.233, + 0.674 + ], + "angle": 0, + "content": "Strategy" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.674, + 0.353, + 0.682 + ], + "angle": 0, + "content": "To maximize the sum, I'll need to determine:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.682, + 0.366, + 0.689 + ], + "angle": 0, + "content": "1. How many squares should be black vs. white" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.689, + 0.354, + 0.697 + ], + "angle": 0, + "content": "2. The optimal arrangement of these squares" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.674, + 0.366, + 0.697 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.704, + 0.234, + 0.712 + ], + "angle": 0, + "content": "Analysis" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.712, + 0.342, + 0.721 + ], + "angle": 0, + "content": "Let's consider what makes the sum large:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.721, + 0.395, + 0.728 + ], + "angle": 0, + "content": "- Each white square can contribute at most 4 to the sum" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.728, + 0.448, + 0.736 + ], + "angle": 0, + "content": "- Each black square can contribute to at most 4 adjacent white squares" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.736, + 0.491, + 0.744 + ], + "angle": 0, + "content": "- The total represents the number of adjacencies between black and white squares" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.712, + 0.491, + 0.744 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.75, + 0.719, + 0.76 + ], + "angle": 0, + "content": "For a 2018-square rectangle, the optimal shape matters. Let's try a \\(43 \\times 47\\) rectangle (43×47=2021, which is close enough to 2018 for our analysis)." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.766, + 0.81, + 0.783 + ], + "angle": 0, + "content": "If we create a checkerboard pattern, approximately half the squares will be black and half white. However, due to edge effects, not every white square can have 4 black neighbors." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.79, + 0.3, + 0.798 + ], + "angle": 0, + "content": "Calculating the Maximum" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.798, + 0.448, + 0.806 + ], + "angle": 0, + "content": "For a rectangle with dimensions \\( r \\times c \\), the maximum sum occurs when:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.806, + 0.383, + 0.814 + ], + "angle": 0, + "content": "- Black squares are placed in a checkerboard pattern" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.814, + 0.449, + 0.822 + ], + "angle": 0, + "content": "- The rectangle is as close to square as possible (minimizing perimeter)" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.798, + 0.449, + 0.822 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.828, + 0.81, + 0.845 + ], + "angle": 0, + "content": "For a nearly square grid with approximately 1009 white squares and 1009 black squares, each interior white square contributes 4, while edge white squares contribute less." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.852, + 0.402, + 0.868 + ], + "angle": 0, + "content": "From a theoretical analysis, the maximum sum would be: \\( -55 \\times 55 = 3025 \\)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.874, + 0.323, + 0.884 + ], + "angle": 0, + "content": "This corresponds to option (C) 3025." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.89, + 0.229, + 0.898 + ], + "angle": 0, + "content": "Answer: C" + }, + { + "type": "image_caption", + "bbox": [ + 0.226, + 0.913, + 0.771, + 0.93 + ], + "angle": 0, + "content": "Figure 31: A sample correct case of Deductive Reasoning (difficulty: Hard)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "44" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.431, + 0.113, + 0.558, + 0.129 + ], + "angle": 0, + "content": "Inductive (Easy)" + }, + { + "type": "image", + "bbox": [ + 0.594, + 0.113, + 0.613, + 0.127 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.186, + 0.141, + 0.23, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.205, + 0.176, + 0.212, + 0.183 + ], + "angle": 0, + "content": "A." + }, + { + "type": "image", + "bbox": [ + 0.261, + 0.142, + 0.302, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.277, + 0.176, + 0.285, + 0.184 + ], + "angle": 0, + "content": "B" + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.142, + 0.373, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.347, + 0.176, + 0.356, + 0.184 + ], + "angle": 0, + "content": "C" + }, + { + "type": "image", + "bbox": [ + 0.407, + 0.142, + 0.45, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.421, + 0.176, + 0.43, + 0.184 + ], + "angle": 0, + "content": "D" + }, + { + "type": "text", + "bbox": [ + 0.461, + 0.142, + 0.799, + 0.168 + ], + "angle": 0, + "content": "Question: Find one figure from the four below that displays a different pattern from the other three." + }, + { + "type": "title", + "bbox": [ + 0.462, + 0.176, + 0.575, + 0.19 + ], + "angle": 0, + "content": "Ground Truth: (D)" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.197, + 0.704, + 0.209 + ], + "angle": 0, + "content": "Claude-3.7-Sonnet-Thinking: Let me analyze each figure to identify any patterns or differences among them:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.215, + 0.555, + 0.226 + ], + "angle": 0, + "content": "Figure A: This is a hexagon with two curved lines inside and a black dot in one corner." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.226, + 0.554, + 0.235 + ], + "angle": 0, + "content": "Figure B: This is a hexagon with two curved lines inside and a black dot in one corner." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.235, + 0.554, + 0.244 + ], + "angle": 0, + "content": "Figure C: This is a hexagon with two curved lines inside and a black dot in one corner." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.244, + 0.555, + 0.253 + ], + "angle": 0, + "content": "Figure D: This is a hexagon with two curved lines inside and a black dot in one corner." + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.215, + 0.555, + 0.253 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.26, + 0.812, + 0.28 + ], + "angle": 0, + "content": "All figures appear to have the same basic elements: a hexagon outline, two curved lines inside, and a black dot. Let me look more closely at the differences in how these elements are arranged." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.287, + 0.308, + 0.297 + ], + "angle": 0, + "content": "Looking at the curved lines:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.297, + 0.611, + 0.306 + ], + "angle": 0, + "content": "- Figure A: The two curved lines appear to form a single continuous curve that divides the hexagon." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.306, + 0.541, + 0.315 + ], + "angle": 0, + "content": "- Figure B: The two curved lines appear to intersect or overlap within the hexagon." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.315, + 0.611, + 0.324 + ], + "angle": 0, + "content": "- Figure C: The two curved lines appear to form a single continuous curve that divides the hexagon." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.324, + 0.611, + 0.333 + ], + "angle": 0, + "content": "- Figure D: The two curved lines appear to form a single continuous curve that divides the hexagon." + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.297, + 0.611, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.34, + 0.334, + 0.35 + ], + "angle": 0, + "content": "Looking at the position of the dot:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.351, + 0.403, + 0.36 + ], + "angle": 0, + "content": "- Figure A: The dot is in one corner of the hexagon." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.36, + 0.403, + 0.369 + ], + "angle": 0, + "content": "- Figure B: The dot is in one corner of the hexagon." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.369, + 0.403, + 0.377 + ], + "angle": 0, + "content": "- Figure C: The dot is in one corner of the hexagon." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.377, + 0.404, + 0.386 + ], + "angle": 0, + "content": "- Figure D: The dot is in one corner of the hexagon." + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.351, + 0.404, + 0.386 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.394, + 0.46, + 0.403 + ], + "angle": 0, + "content": "Let me look even more closely at the arrangement of the curves:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.403, + 0.578, + 0.412 + ], + "angle": 0, + "content": "- Figure A: The curves divide the hexagon into sections, with one section containing the dot." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.413, + 0.617, + 0.422 + ], + "angle": 0, + "content": "- Figure B: The curves create a pattern where they seem to overlap or create a more complex division." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.422, + 0.577, + 0.43 + ], + "angle": 0, + "content": "- Figure C: The curves divide the hexagon into sections, with one section containing the dot." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.43, + 0.578, + 0.439 + ], + "angle": 0, + "content": "- Figure D: The curves divide the hexagon into sections, with one section containing the dot." + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.403, + 0.617, + 0.439 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.447, + 0.812, + 0.475 + ], + "angle": 0, + "content": "Based on this closer examination, Figure B appears to have a different pattern in how the curved lines are arranged within the hexagon compared to the other three figures. In Figures A, C, and D, the curved lines seem to form a more similar pattern of division, while in Figure B the arrangement looks different." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.482, + 0.54, + 0.493 + ], + "angle": 0, + "content": "Therefore, Figure B is the one that displays a different pattern from the other three." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.493, + 0.256, + 0.502 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.502, + 0.359, + 0.511 + ], + "angle": 0, + "content": "# Pattern Recognition Problem Analysis" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.482, + 0.54, + 0.511 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.518, + 0.577, + 0.529 + ], + "angle": 0, + "content": "I need to analyze each figure and identify which one has a different pattern from the others." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.536, + 0.302, + 0.546 + ], + "angle": 0, + "content": "Looking at all four figures:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.546, + 0.266, + 0.555 + ], + "angle": 0, + "content": "- All are hexagons" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.555, + 0.323, + 0.563 + ], + "angle": 0, + "content": "- All contain curved lines inside" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.563, + 0.307, + 0.572 + ], + "angle": 0, + "content": "- All have a single black dot" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.572, + 0.46, + 0.582 + ], + "angle": 0, + "content": "- All have two curved lines creating sections within the hexagon" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.546, + 0.46, + 0.582 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.589, + 0.501, + 0.6 + ], + "angle": 0, + "content": "Let me examine the specific arrangement of these elements in each figure:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.608, + 0.243, + 0.616 + ], + "angle": 0, + "content": "## Figure A:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.617, + 0.35, + 0.625 + ], + "angle": 0, + "content": "- Two curved lines that create sections" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.626, + 0.408, + 0.634 + ], + "angle": 0, + "content": "- The black dot is positioned in the lower left corner" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.634, + 0.49, + 0.644 + ], + "angle": 0, + "content": "- The curves form a pattern that divides the hexagon into three sections" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.617, + 0.49, + 0.644 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.652, + 0.241, + 0.661 + ], + "angle": 0, + "content": "Figure B:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.661, + 0.414, + 0.67 + ], + "angle": 0, + "content": "- Two curved lines that appear to overlap or intersect" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.67, + 0.418, + 0.678 + ], + "angle": 0, + "content": "- The black dot is positioned in the lower right corner" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.679, + 0.512, + 0.689 + ], + "angle": 0, + "content": "- The curves create a different division pattern compared to the other figures" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.661, + 0.512, + 0.689 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.696, + 0.243, + 0.705 + ], + "angle": 0, + "content": "Figure C:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.706, + 0.35, + 0.714 + ], + "angle": 0, + "content": "- Two curved lines that create sections" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.715, + 0.408, + 0.723 + ], + "angle": 0, + "content": "- The black dot is positioned in the lower left corner" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.723, + 0.409, + 0.733 + ], + "angle": 0, + "content": "- The pattern of curve division is similar to Figure A" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.706, + 0.409, + 0.733 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.741, + 0.243, + 0.75 + ], + "angle": 0, + "content": "Figure D:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.751, + 0.35, + 0.759 + ], + "angle": 0, + "content": "- Two curved lines that create sections" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.759, + 0.415, + 0.768 + ], + "angle": 0, + "content": "- The black dot is positioned in the lower right corner" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.768, + 0.441, + 0.777 + ], + "angle": 0, + "content": "- The pattern of curve division is similar to Figures A and C." + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.751, + 0.441, + 0.777 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.785, + 0.811, + 0.805 + ], + "angle": 0, + "content": "The key difference is in how the curved lines are arranged. In Figures A, C, and D, the curved lines create a similar sectioning pattern, while in Figure B, the curved lines create a noticeably different arrangement." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.812, + 0.234, + 0.821 + ], + "angle": 0, + "content": "Answer: B" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.826, + 0.38, + 0.838 + ], + "angle": 0, + "content": "Error Category: Reasoning Error" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.839, + 0.812, + 0.9 + ], + "angle": 0, + "content": "Error Reason: The model makes a reasoning error by overemphasizing minor variations in the curve overlap while overlooking the more fundamental structural difference. Figure D is the only one where the black dot and curves are positioned differently, breaking the symmetry seen in A, B, and C. The model incorrectly identifies B as the outlier due to superficial curve complexity rather than recognizing the true deviation in spatial structure." + }, + { + "type": "image_caption", + "bbox": [ + 0.237, + 0.913, + 0.76, + 0.93 + ], + "angle": 0, + "content": "Figure 32: A sample error case of Inductive Reasoning (difficulty: Easy)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "45" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.431, + 0.113, + 0.558, + 0.129 + ], + "angle": 0, + "content": "Inductive (Easy)" + }, + { + "type": "image", + "bbox": [ + 0.594, + 0.113, + 0.613, + 0.127 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.283, + 0.137, + 0.414, + 0.21 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.199, + 0.216, + 0.289, + 0.226 + ], + "angle": 0, + "content": "A. 3.5.6.7" + }, + { + "type": "image_footnote", + "bbox": [ + 0.342, + 0.217, + 0.428, + 0.226 + ], + "angle": 0, + "content": "B." + }, + { + "type": "image_footnote", + "bbox": [ + 0.199, + 0.234, + 0.288, + 0.244 + ], + "angle": 0, + "content": "C." + }, + { + "type": "image_footnote", + "bbox": [ + 0.342, + 0.234, + 0.43, + 0.243 + ], + "angle": 0, + "content": "D." + }, + { + "type": "text", + "bbox": [ + 0.446, + 0.158, + 0.799, + 0.185 + ], + "angle": 0, + "content": "Question: Analyze the pattern of changes in the figure below, and choose the appropriate fifth row:" + }, + { + "type": "text", + "bbox": [ + 0.447, + 0.193, + 0.56, + 0.207 + ], + "angle": 0, + "content": "Ground Truth: (B)" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.25, + 0.812, + 0.268 + ], + "angle": 0, + "content": "Claude-3.7-Sonnet-Thinking: Let me analyze the pattern in this figure carefully. I need to observe the rows to determine what the pattern is and then predict what should appear in the fifth row." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.274, + 0.278, + 0.282 + ], + "angle": 0, + "content": "Row 1:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.283, + 0.278, + 0.29 + ], + "angle": 0, + "content": "Row 2:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.29, + 0.278, + 0.297 + ], + "angle": 0, + "content": "Row 3:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.297, + 0.278, + 0.305 + ], + "angle": 0, + "content": "Row 4:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.313, + 0.324, + 0.321 + ], + "angle": 0, + "content": "Looking at this pattern, I notice that:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.322, + 0.284, + 0.33 + ], + "angle": 0, + "content": "- Each row has 7 symbols" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.33, + 0.393, + 0.338 + ], + "angle": 0, + "content": "Each row contains a mix of hearts \\((\\bigcirc)\\) and flowers \\((\\bullet)\\)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.338, + 0.326, + 0.346 + ], + "angle": 0, + "content": "- There seems to be a shifting pattern" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.313, + 0.393, + 0.346 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.352, + 0.376, + 0.361 + ], + "angle": 0, + "content": "I notice that if I track the position of the hearts \\((\\bigcirc)\\):" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.362, + 0.279, + 0.369 + ], + "angle": 0, + "content": "-Row 1:positions 1,3,4" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.369, + 0.279, + 0.376 + ], + "angle": 0, + "content": "-Row 2:positions 2,4,5" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.377, + 0.279, + 0.384 + ], + "angle": 0, + "content": "-Row 3:positions 3,5,6" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.384, + 0.279, + 0.391 + ], + "angle": 0, + "content": "-Row 4:positions 4,6,7" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.362, + 0.279, + 0.391 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.399, + 0.81, + 0.416 + ], + "angle": 0, + "content": "It looks like the hearts are shifting one position to the right in each row. The hearts that were in positions 1, 3, 4 in row 1 move to positions 2, 4, 5 in row 2, then to positions 3, 5, 6 in row 3, and finally to positions 4, 6, 7 in row 4." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.422, + 0.666, + 0.432 + ], + "angle": 0, + "content": "Following this pattern, in row 5, the hearts should be in positions 5, 7, and there should be one that \"wraps around\" to position 1." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.437, + 0.351, + 0.446 + ], + "angle": 0, + "content": "So the fifth row should be: \\(\\bigcirc \\bigcirc \\bigcirc \\bigcirc \\bigcirc \\bigcirc\\)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.453, + 0.33, + 0.461 + ], + "angle": 0, + "content": "Now let me check the answer choices:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.462, + 0.338, + 0.469 + ], + "angle": 0, + "content": "A. \\(\\bullet \\bullet \\bullet \\bullet \\bullet \\bullet \\bullet\\) - This doesn't match" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.47, + 0.37, + 0.477 + ], + "angle": 0, + "content": "B.♥ - This matches my prediction" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.477, + 0.338, + 0.484 + ], + "angle": 0, + "content": "C. \\(\\because \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\cdot\\) This doesn't match" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.485, + 0.338, + 0.492 + ], + "angle": 0, + "content": "D. \\(\\bigstar \\bigstar \\bigstar \\bigstar \\bigstar \\bigstar \\bigstar \\bigstar -\\) This doesn't match" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.462, + 0.37, + 0.492 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.5, + 0.289, + 0.508 + ], + "angle": 0, + "content": "So the answer should be B." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.515, + 0.458, + 0.524 + ], + "angle": 0, + "content": "Let me double-check by looking at the pattern of the flowers \\((\\star)\\) as well:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.525, + 0.288, + 0.532 + ], + "angle": 0, + "content": "-Row 1:positions2,5,6,7" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.532, + 0.288, + 0.539 + ], + "angle": 0, + "content": "-Row 2:positions1,3,6,7" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.54, + 0.288, + 0.547 + ], + "angle": 0, + "content": "-Row 3:positions1,2,4,7" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.548, + 0.288, + 0.555 + ], + "angle": 0, + "content": "-Row 4:positions1,2,3,5" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.515, + 0.458, + 0.555 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.562, + 0.565, + 0.571 + ], + "angle": 0, + "content": "The flowers are in all positions where hearts are not. So in row 5, they should be in positions 2, 3, 4, 6." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.571, + 0.294, + 0.578 + ], + "angle": 0, + "content": "That gives: \\(\\text{心}\\)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.578, + 0.396, + 0.586 + ], + "angle": 0, + "content": "This confirms that option B (♥ ♀ ♀ ♀ ♀ ♀) is correct." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.587, + 0.247, + 0.594 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.594, + 0.327, + 0.602 + ], + "angle": 0, + "content": "Analyzing the Pattern in the Figure" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.609, + 0.537, + 0.618 + ], + "angle": 0, + "content": "I need to identify the pattern of changes across rows and predict what the fifth row should be." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.625, + 0.3, + 0.632 + ], + "angle": 0, + "content": "## Observation of the Pattern" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.64, + 0.315, + 0.648 + ], + "angle": 0, + "content": "Looking at the existing four rows:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.649, + 0.282, + 0.655 + ], + "angle": 0, + "content": "-Row1:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.656, + 0.282, + 0.662 + ], + "angle": 0, + "content": "-Row2:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.663, + 0.282, + 0.669 + ], + "angle": 0, + "content": "-Row 3:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.67, + 0.282, + 0.677 + ], + "angle": 0, + "content": "-Row 4:" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.649, + 0.282, + 0.677 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.687, + 0.263, + 0.695 + ], + "angle": 0, + "content": "## Pattern Analysis" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.703, + 0.437, + 0.711 + ], + "angle": 0, + "content": "I notice that the position of the hearts \\((\\hat{\\mathbb{V}})\\) shifts in a consistent way:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.712, + 0.315, + 0.719 + ], + "angle": 0, + "content": "-Row 1: Hearts in positions 1,3,4" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.719, + 0.315, + 0.726 + ], + "angle": 0, + "content": "-Row 2: Hearts in positions 2,4,5" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.727, + 0.315, + 0.734 + ], + "angle": 0, + "content": "-Row 3: Hearts in positions 3,5,6" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.734, + 0.315, + 0.742 + ], + "angle": 0, + "content": "-Row 4: Hearts in positions 4,6,7" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.703, + 0.437, + 0.742 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.749, + 0.607, + 0.758 + ], + "angle": 0, + "content": "The hearts are shifting one position to the right in each row, with a \"wraparound\" effect when they reach the end." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.765, + 0.293, + 0.773 + ], + "angle": 0, + "content": "Predicting the Fifth Row" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.78, + 0.433, + 0.788 + ], + "angle": 0, + "content": "Following this pattern, in row 5, the hearts should be in positions:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.789, + 0.258, + 0.795 + ], + "angle": 0, + "content": "-5 shifted from 4)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.796, + 0.258, + 0.803 + ], + "angle": 0, + "content": "-7 (shifted from 6)" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.78, + 0.433, + 0.803 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.805, + 0.324, + 0.812 + ], + "angle": 0, + "content": "-1 (shifted from 7, wrapped around)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.819, + 0.35, + 0.827 + ], + "angle": 0, + "content": "So the fifth row should be:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.835, + 0.282, + 0.843 + ], + "angle": 0, + "content": "## Checking the Options" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.851, + 0.314, + 0.858 + ], + "angle": 0, + "content": "A. \\(\\bullet \\bullet \\bullet \\bullet \\bullet \\bullet \\bullet -\\mathrm{Not}\\) a match" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.858, + 0.37, + 0.866 + ], + "angle": 0, + "content": "B.♥ -This matches my prediction" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.867, + 0.314, + 0.874 + ], + "angle": 0, + "content": "C.♥ -Not a match" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.874, + 0.314, + 0.881 + ], + "angle": 0, + "content": "D. \\(\\text{串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.851, + 0.37, + 0.881 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.889, + 0.228, + 0.897 + ], + "angle": 0, + "content": "Answer: B" + }, + { + "type": "image_caption", + "bbox": [ + 0.23, + 0.914, + 0.766, + 0.93 + ], + "angle": 0, + "content": "Figure 33: A sample correct case of Inductive Reasoning (difficulty: Easy)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "46" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.416, + 0.113, + 0.574, + 0.129 + ], + "angle": 0, + "content": "Inductive (Medium)" + }, + { + "type": "image", + "bbox": [ + 0.594, + 0.113, + 0.614, + 0.128 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.205, + 0.141, + 0.451, + 0.184 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.23, + 0.189, + 0.424, + 0.231 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.251, + 0.235, + 0.262, + 0.242 + ], + "angle": 0, + "content": "A" + }, + { + "type": "text", + "bbox": [ + 0.3, + 0.235, + 0.312, + 0.242 + ], + "angle": 0, + "content": "B" + }, + { + "type": "text", + "bbox": [ + 0.345, + 0.235, + 0.354, + 0.242 + ], + "angle": 0, + "content": "C" + }, + { + "type": "text", + "bbox": [ + 0.4, + 0.235, + 0.404, + 0.242 + ], + "angle": 0, + "content": "D" + }, + { + "type": "text", + "bbox": [ + 0.471, + 0.159, + 0.801, + 0.198 + ], + "angle": 0, + "content": "Question: Choose the most appropriate option from the four given choices to fill in the question mark, so that the figures follow a pattern." + }, + { + "type": "text", + "bbox": [ + 0.472, + 0.212, + 0.585, + 0.226 + ], + "angle": 0, + "content": "Ground Truth: (C)" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.25, + 0.813, + 0.271 + ], + "angle": 0, + "content": "Claude-3.7-Sonnet-Thinking: Let me analyze the pattern in the given sequence of figures and find what should come in place of the question mark." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.277, + 0.721, + 0.289 + ], + "angle": 0, + "content": "The sequence shows hexagons with internal lines. Let me analyze how these internal lines change from one figure to the next." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.295, + 0.652, + 0.307 + ], + "angle": 0, + "content": "First figure: A hexagon with a single line from the top vertex going down to about the middle of the hexagon." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.313, + 0.661, + 0.324 + ], + "angle": 0, + "content": "Second figure: A hexagon with a single line from the upper-right vertex going to the middle-left of the hexagon." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.33, + 0.655, + 0.342 + ], + "angle": 0, + "content": "Third figure: A hexagon with a single line from the bottom-right vertex going to the upper-left of the hexagon." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.348, + 0.66, + 0.36 + ], + "angle": 0, + "content": "Fourth figure: A hexagon with a single line from the bottom-left vertex going to the upper-right of the hexagon." + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.366, + 0.812, + 0.387 + ], + "angle": 0, + "content": "So the pattern seems to be that the line rotates in a clockwise direction around the hexagon. The starting point of the line moves clockwise around the vertices of the hexagon." + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.393, + 0.812, + 0.414 + ], + "angle": 0, + "content": "Given this pattern, the fifth figure (the one with the question mark) should have a line from the lower vertex (at the bottom) going up to the upper-right of the hexagon." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.42, + 0.309, + 0.43 + ], + "angle": 0, + "content": "Now let's check the options:" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.43, + 0.517, + 0.439 + ], + "angle": 0, + "content": "Option A: A hexagon with a line from the left vertex going to the right vertex." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.439, + 0.542, + 0.448 + ], + "angle": 0, + "content": "Option B: A hexagon with a line from the upper-left vertex going to the lower-right." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.448, + 0.521, + 0.456 + ], + "angle": 0, + "content": "Option C: A hexagon with a line from the top vertex going to the bottom-right." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.456, + 0.498, + 0.465 + ], + "angle": 0, + "content": "Option D: A hexagon with a line from the top vertex going to the bottom." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.473, + 0.81, + 0.493 + ], + "angle": 0, + "content": "Looking at the pattern and the next expected figure, option A seems closest to what I'm expecting - a line going from the left side to the right side. Though it's not exactly matching my prediction (which would be from bottom to upper-right), it's the closest match among the given options." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.5, + 0.812, + 0.52 + ], + "angle": 0, + "content": "OK, I think I've been struggling because I don't have a completely clear view of the images. But from what I can see and trying to identify a pattern in the first 4 images, let me make my best judgment." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.526, + 0.812, + 0.546 + ], + "angle": 0, + "content": "The line inside the hexagon appears to be moving in some pattern as we go from one figure to the next. Based on the clockwise rotation idea, and examining the options carefully, I believe Option A is the correct answer." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.546, + 0.257, + 0.554 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.554, + 0.373, + 0.563 + ], + "angle": 0, + "content": "Analysis of Pattern in Hexagon Sequence" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.571, + 0.69, + 0.582 + ], + "angle": 0, + "content": "I need to identify the pattern in the sequence of figures and determine which option should replace the question mark." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.589, + 0.299, + 0.599 + ], + "angle": 0, + "content": "Examining the Pattern" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.606, + 0.411, + 0.617 + ], + "angle": 0, + "content": "Let me analyze the first four figures in the sequence:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.624, + 0.524, + 0.634 + ], + "angle": 0, + "content": "1. First figure: A hexagon with a line from the top vertex to the bottom-left side" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.634, + 0.541, + 0.643 + ], + "angle": 0, + "content": "2. Second figure: A hexagon with a line from the top-right vertex to the bottom side" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.643, + 0.517, + 0.652 + ], + "angle": 0, + "content": "3. Third figure: A hexagon with a line from the right vertex to the top-left side" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.652, + 0.539, + 0.661 + ], + "angle": 0, + "content": "4. Fourth figure: A hexagon with a line from the bottom-right vertex to the left side" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.624, + 0.541, + 0.661 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.668, + 0.812, + 0.689 + ], + "angle": 0, + "content": "Looking at this sequence, I can see that the starting point of the line moves clockwise around the hexagon (from top \\(\\rightarrow\\) top-right \\(\\rightarrow\\) right \\(\\rightarrow\\) bottom-right). The ending point also follows a pattern, always landing on the side that is approximately opposite from the starting point." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.695, + 0.316, + 0.706 + ], + "angle": 0, + "content": "Predicting the Next Figure" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.713, + 0.812, + 0.734 + ], + "angle": 0, + "content": "Following this pattern, the fifth figure should have a line starting from the bottom-left vertex (continuing the clockwise movement of the starting point). The line should end at the top or top-right side." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.74, + 0.303, + 0.751 + ], + "angle": 0, + "content": "Evaluating the Options" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.758, + 0.329, + 0.767 + ], + "angle": 0, + "content": "Looking at the provided options:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.767, + 0.485, + 0.776 + ], + "angle": 0, + "content": "- Option A: A hexagon with a line from the left vertex to the right side" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.776, + 0.539, + 0.785 + ], + "angle": 0, + "content": "- Option B: A hexagon with a line from the upper-left vertex to the lower-right side" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.785, + 0.513, + 0.793 + ], + "angle": 0, + "content": "- Option C: A hexagon with a line from the upper-left vertex to the right side" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.794, + 0.486, + 0.803 + ], + "angle": 0, + "content": "- Option D: A hexagon with a line from the top vertex to the right side" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.767, + 0.539, + 0.803 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.81, + 0.812, + 0.831 + ], + "angle": 0, + "content": "Option A is the only one that features a line starting from the left vertex, which is the closest to the bottom-left vertex that should be next in our pattern. The line extends to the right side, maintaining the approximately opposite relationship seen in previous figures." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.838, + 0.235, + 0.847 + ], + "angle": 0, + "content": "Answer: A" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.851, + 0.38, + 0.863 + ], + "angle": 0, + "content": "Error Category: Reasoning Error" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.863, + 0.812, + 0.901 + ], + "angle": 0, + "content": "Error Reason: The model correctly observes a clockwise movement but incorrectly predicts the direction and placement of the next line, choosing option A. The model fails to identify the angle of the movement pattern." + }, + { + "type": "image_caption", + "bbox": [ + 0.223, + 0.913, + 0.773, + 0.93 + ], + "angle": 0, + "content": "Figure 34: A sample error case of Inductive Reasoning (difficulty: Medium)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "47" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.416, + 0.113, + 0.574, + 0.129 + ], + "angle": 0, + "content": "Inductive (Medium)" + }, + { + "type": "image", + "bbox": [ + 0.594, + 0.113, + 0.614, + 0.127 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.249, + 0.131, + 0.296, + 0.167 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.267, + 0.167, + 0.278, + 0.173 + ], + "angle": 0, + "content": "①" + }, + { + "type": "image", + "bbox": [ + 0.302, + 0.133, + 0.346, + 0.165 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.318, + 0.167, + 0.328, + 0.173 + ], + "angle": 0, + "content": "②" + }, + { + "type": "image", + "bbox": [ + 0.352, + 0.133, + 0.397, + 0.166 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.35, + 0.167, + 0.359, + 0.173 + ], + "angle": 0, + "content": "\\( \\therefore m = \\frac{3}{11} \\) ;" + }, + { + "type": "text", + "bbox": [ + 0.41, + 0.16, + 0.8, + 0.187 + ], + "angle": 0, + "content": "Question: Divide the following six figures into two categories, so that each category displays its own pattern." + }, + { + "type": "text", + "bbox": [ + 0.411, + 0.198, + 0.525, + 0.212 + ], + "angle": 0, + "content": "Ground Truth: (D)" + }, + { + "type": "image", + "bbox": [ + 0.249, + 0.178, + 0.296, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.267, + 0.213, + 0.279, + 0.22 + ], + "angle": 0, + "content": "\\(④\\)" + }, + { + "type": "image", + "bbox": [ + 0.302, + 0.179, + 0.346, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.318, + 0.213, + 0.329, + 0.22 + ], + "angle": 0, + "content": "(5)" + }, + { + "type": "image", + "bbox": [ + 0.352, + 0.179, + 0.397, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.35, + 0.213, + 0.359, + 0.22 + ], + "angle": 0, + "content": "\\( \\therefore m = \\frac{3}{11} \\) ;" + }, + { + "type": "text", + "bbox": [ + 0.319, + 0.227, + 0.389, + 0.235 + ], + "angle": 0, + "content": "B. ①④⑥, ②③⑤" + }, + { + "type": "text", + "bbox": [ + 0.32, + 0.239, + 0.389, + 0.246 + ], + "angle": 0, + "content": "D. ①②④, ③⑤⑥" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.226, + 0.258, + 0.235 + ], + "angle": 0, + "content": "A. ①③⑤. ②④⑥" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.239, + 0.256, + 0.246 + ], + "angle": 0, + "content": "C.①③④. ②⑤⑥" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.226, + 0.258, + 0.246 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.254, + 0.812, + 0.265 + ], + "angle": 0, + "content": "Clae-3.7-Sonnet-Thinking: I need to analyze the six figures and find a pattern that divides them into two distinct categories. Let me examine each figure:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.27, + 0.516, + 0.28 + ], + "angle": 0, + "content": "Figure ①: A square with an internal shape that looks like a doorway or page with a fold." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.28, + 0.485, + 0.288 + ], + "angle": 0, + "content": "Figure ②: A square with an internal shape that looks like an L-shape or corner." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.288, + 0.501, + 0.296 + ], + "angle": 0, + "content": "Figure ③: A square with what appears to be a parallelogram with a small arc on top." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.296, + 0.458, + 0.304 + ], + "angle": 0, + "content": "Figure 4: A square with some zigzag or lightning-bolt-like shape inside." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.304, + 0.421, + 0.312 + ], + "angle": 0, + "content": "Figure ⑤: A square with a circle that has an X or cross inside it." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.312, + 0.473, + 0.32 + ], + "angle": 0, + "content": "Figure ⑥: A square with what looks like a stylized figure or hourglass shape." + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.27, + 0.516, + 0.32 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.325, + 0.661, + 0.334 + ], + "angle": 0, + "content": "Now I need to look for patterns or characteristics that could divide these into two groups. Some possible categorization criteria:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.334, + 0.307, + 0.342 + ], + "angle": 0, + "content": "- Curved vs. straight lines" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.342, + 0.306, + 0.349 + ], + "angle": 0, + "content": "- Symmetrical vs. asymmetrical" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.349, + 0.266, + 0.357 + ], + "angle": 0, + "content": "- Simple vs. complex" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.357, + 0.281, + 0.365 + ], + "angle": 0, + "content": "- Open vs. closed shapes" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.365, + 0.314, + 0.373 + ], + "angle": 0, + "content": "- Single shape vs. multiple shapes" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.334, + 0.314, + 0.373 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.38, + 0.312, + 0.389 + ], + "angle": 0, + "content": "Let me try to apply these criteria:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.395, + 0.282, + 0.403 + ], + "angle": 0, + "content": "Curved vs. straight lines:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.403, + 0.376, + 0.411 + ], + "angle": 0, + "content": "- Figures with prominent curves: ⑤ (circle with X)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.411, + 0.381, + 0.42 + ], + "angle": 0, + "content": "Figures with mainly straight lines: ①, ②, ③, ④, ⑥" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.426, + 0.351, + 0.436 + ], + "angle": 0, + "content": "That doesn't divide evenly into two groups." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.442, + 0.303, + 0.45 + ], + "angle": 0, + "content": "Symmetrical vs. asymmetrical:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.45, + 0.373, + 0.458 + ], + "angle": 0, + "content": "- More symmetrical: ⑤ (circle with X), possibly ⑥" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.458, + 0.363, + 0.466 + ], + "angle": 0, + "content": "- Less symmetrical/asymmetrical: ①, ②, ③, ④" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.442, + 0.373, + 0.466 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.473, + 0.264, + 0.482 + ], + "angle": 0, + "content": "Simple vs. complex:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.482, + 0.276, + 0.489 + ], + "angle": 0, + "content": "- Simpler shapes: ②, ⑤" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.489, + 0.324, + 0.497 + ], + "angle": 0, + "content": "- More complex shapes: ①, ③, ④, ⑥" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.482, + 0.324, + 0.497 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.504, + 0.342, + 0.512 + ], + "angle": 0, + "content": "Open vs. closed shapes inside the square:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.512, + 0.287, + 0.519 + ], + "angle": 0, + "content": "- Closed shapes: ⑤ (circle)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.519, + 0.303, + 0.528 + ], + "angle": 0, + "content": "- Open shapes: ①, ②, ③, ④, ⑥" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.512, + 0.303, + 0.528 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.535, + 0.435, + 0.544 + ], + "angle": 0, + "content": "Let me look at whether the internal shapes touch the outer square:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.544, + 0.4, + 0.552 + ], + "angle": 0, + "content": "- Touching the outer square: ①, ②, ④, possibly ③ and ⑥" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.552, + 0.319, + 0.56 + ], + "angle": 0, + "content": "- Not touching the outer square: ⑤" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.544, + 0.4, + 0.56 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.566, + 0.452, + 0.575 + ], + "angle": 0, + "content": "Another possibility - shapes with internal divisions vs. shapes without:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.576, + 0.316, + 0.583 + ], + "angle": 0, + "content": "- With internal divisions: ①, ④, ⑤" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.583, + 0.328, + 0.59 + ], + "angle": 0, + "content": "- Without internal divisions: ②, ③, ⑥" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.566, + 0.452, + 0.59 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.597, + 0.453, + 0.606 + ], + "angle": 0, + "content": "Let me check the answer choices and see if any of these patterns match:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.612, + 0.254, + 0.621 + ], + "angle": 0, + "content": "A. ①③⑤, ②④⑥" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.621, + 0.253, + 0.628 + ], + "angle": 0, + "content": "B. ①④⑥, ②③⑤" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.628, + 0.253, + 0.636 + ], + "angle": 0, + "content": "C. ①③④, ②⑤⑥" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.636, + 0.254, + 0.644 + ], + "angle": 0, + "content": "D. ①②④, ③⑤⑥" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.612, + 0.254, + 0.644 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.651, + 0.805, + 0.661 + ], + "angle": 0, + "content": "Looking at option A: What could unite \\(①③⑤\\) ? They don't seem to share an obvious common pattern. What could unite \\(246?\\) Again, not seeing an obvious pattern." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.661, + 0.761, + 0.669 + ], + "angle": 0, + "content": "Looking at option B: For \\(①④⑥\\) They all have some kind of irregular geometric shape. For \\(②③⑤\\) They all seem to have more regular geometric elements." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.669, + 0.55, + 0.676 + ], + "angle": 0, + "content": "Looking at option C: For \\(①③④\\) : Not seeing a clear pattern. For \\(②⑤⑥\\) : Not seeing a clear pattern." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.676, + 0.811, + 0.684 + ], + "angle": 0, + "content": "Looking at option D: For \\(124\\) These all have shapes with straight lines and angles. For \\(356\\) These all have some curved elements - \\(③\\) has an arc on top, \\(⑤\\) is a" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.684, + 0.285, + 0.692 + ], + "angle": 0, + "content": "circle, \\(⑥\\) has curved sides" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.698, + 0.432, + 0.706 + ], + "angle": 0, + "content": "I think option D might be the answer. The distinction seems to be:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.706, + 0.406, + 0.715 + ], + "angle": 0, + "content": "-Group \\(①②④\\) Figures with only straight lines and angles" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.715, + 0.423, + 0.723 + ], + "angle": 0, + "content": "- Group \\(③⑤⑥\\) Figures that incorporate some curved elements" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.723, + 0.247, + 0.73 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.73, + 0.319, + 0.739 + ], + "angle": 0, + "content": "Analyzing Six Geometric Figures" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.745, + 0.604, + 0.754 + ], + "angle": 0, + "content": "I need to find a pattern that separates these six figures into two categories. Let me examine each figure carefully:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.76, + 0.539, + 0.769 + ], + "angle": 0, + "content": "Figure ①: Square with an internal shape that has straight edges (looks like a doorway or page)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.769, + 0.458, + 0.777 + ], + "angle": 0, + "content": "Figure ②: Square with an L-shaped internal figure made of straight lines" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.777, + 0.502, + 0.785 + ], + "angle": 0, + "content": "Figure ③: Square with a trapezoid/parallelogram shape that has a curved arc on top" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.785, + 0.505, + 0.793 + ], + "angle": 0, + "content": "Figure ④: Square with zigzag/lightning-shaped internal figure made of straight lines" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.793, + 0.394, + 0.801 + ], + "angle": 0, + "content": "Figure ⑤: Square with a circle (curved) containing an X" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.801, + 0.477, + 0.809 + ], + "angle": 0, + "content": "Figure ⑥: Square with what appears to be a stylized figure with curved edges" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.769, + 0.505, + 0.809 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.815, + 0.338, + 0.824 + ], + "angle": 0, + "content": "Looking at potential pattern differences:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.83, + 0.294, + 0.838 + ], + "angle": 0, + "content": "Option D: ①②④, ③⑤⑥" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.838, + 0.468, + 0.847 + ], + "angle": 0, + "content": "- Group ①②④: All figures contain ONLY straight lines and angular shapes" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.847, + 0.548, + 0.855 + ], + "angle": 0, + "content": "- Group \\(③⑤⑥\\) All figures incorporate curved elements (arc in \\(③\\) circle in \\(⑤\\) curved sides in \\(⑥\\)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.855, + 0.793, + 0.863 + ], + "angle": 0, + "content": "This is a clear and consistent pattern. The first group has exclusively straight-line geometry, while the second group all incorporate curved elements in their design." + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.838, + 0.793, + 0.863 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.869, + 0.41, + 0.879 + ], + "angle": 0, + "content": "The other options don't reveal as clear a pattern distinction." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.885, + 0.23, + 0.893 + ], + "angle": 0, + "content": "Answer: D" + }, + { + "type": "image_caption", + "bbox": [ + 0.216, + 0.913, + 0.781, + 0.93 + ], + "angle": 0, + "content": "Figure 35: A sample correct case of Inductive Reasoning (difficulty: Medium)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "48" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.429, + 0.113, + 0.561, + 0.129 + ], + "angle": 0, + "content": "Inductive (Hard)" + }, + { + "type": "image", + "bbox": [ + 0.594, + 0.113, + 0.614, + 0.128 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.201, + 0.135, + 0.417, + 0.167 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.236, + 0.171, + 0.382, + 0.203 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.253, + 0.205, + 0.365, + 0.212 + ], + "angle": 0, + "content": "A B C D" + }, + { + "type": "text", + "bbox": [ + 0.427, + 0.139, + 0.793, + 0.178 + ], + "angle": 0, + "content": "Question: Choose the most appropriate option from the four given choices to fill in the question mark, so that the figures follow a pattern." + }, + { + "type": "text", + "bbox": [ + 0.428, + 0.189, + 0.542, + 0.204 + ], + "angle": 0, + "content": "Ground Truth: (B)" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.216, + 0.575, + 0.228 + ], + "angle": 0, + "content": "Claude-3.7-Sonnet-Thinking: Let me analyze this pattern recognition problem step by step." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.232, + 0.813, + 0.25 + ], + "angle": 0, + "content": "The question shows a sequence of 5 geometric figures, followed by a question mark. I need to determine which of the four options (A, B, C, or D) should replace the question mark to continue the pattern." + }, + { + "type": "title", + "bbox": [ + 0.186, + 0.256, + 0.251, + 0.264 + ], + "angle": 0, + "content": "The sequence is:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.266, + 0.287, + 0.272 + ], + "angle": 0, + "content": "1. Two overlapping circles" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.273, + 0.287, + 0.28 + ], + "angle": 0, + "content": "2. A triangle inside a circle" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.281, + 0.39, + 0.288 + ], + "angle": 0, + "content": "3. A shape that looks like an oval with horizontal lines" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.289, + 0.379, + 0.296 + ], + "angle": 0, + "content": "4. A square with a triangle inside or a diagonal line" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.296, + 0.308, + 0.304 + ], + "angle": 0, + "content": "5. A pentagon (5-sided polygon)" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.266, + 0.39, + 0.304 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.304, + 0.322, + 0.311 + ], + "angle": 0, + "content": "? (The figure we need to determine)" + }, + { + "type": "title", + "bbox": [ + 0.186, + 0.318, + 0.221, + 0.326 + ], + "angle": 0, + "content": "Options:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.327, + 0.308, + 0.335 + ], + "angle": 0, + "content": "A. A hexagon (6-sided polygon)" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.335, + 0.316, + 0.343 + ], + "angle": 0, + "content": "B. A rectangle with a diagonal line" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.343, + 0.323, + 0.35 + ], + "angle": 0, + "content": "C. A square with a grid/cross inside" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.327, + 0.323, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.35, + 0.237, + 0.358 + ], + "angle": 0, + "content": "D. A triangle" + }, + { + "type": "title", + "bbox": [ + 0.186, + 0.365, + 0.281, + 0.373 + ], + "angle": 0, + "content": "Let me look for patterns:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.373, + 0.692, + 0.381 + ], + "angle": 0, + "content": "- If we're counting sides: circles (infinite sides), triangle (3 sides), oval (curvy, so infinite?), square/rectangle (4 sides), pentagon (5 sides)" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.381, + 0.617, + 0.389 + ], + "angle": 0, + "content": "- If looking at the number of sides, it might be increasing: \\(\\infty, 3, \\infty, 4, 5\\ldots\\) which would suggest 6 sides next (hexagon)" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.389, + 0.507, + 0.397 + ], + "angle": 0, + "content": "- Or it could be about complexity of the shapes or some transformation between them" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.373, + 0.692, + 0.397 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.403, + 0.697, + 0.414 + ], + "angle": 0, + "content": "Looking at option A (hexagon), this would follow if the pattern is increasing the number of sides: pentagon (5 sides) \\(\\rightarrow\\) hexagon (6 sides)." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.419, + 0.598, + 0.428 + ], + "angle": 0, + "content": "This seems like the most logical pattern - the progression of regular polygons with increasing number of sides:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.429, + 0.261, + 0.436 + ], + "angle": 0, + "content": "- Pentagon (5 sides)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.436, + 0.261, + 0.444 + ], + "angle": 0, + "content": "- Hexagon (6 sides)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.444, + 0.263, + 0.452 + ], + "angle": 0, + "content": "- Heptagon (7 sides)" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.452, + 0.259, + 0.459 + ], + "angle": 0, + "content": "- Octagon (8 sides)" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.429, + 0.263, + 0.459 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.46, + 0.202, + 0.466 + ], + "angle": 0, + "content": "etc." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.474, + 0.344, + 0.483 + ], + "angle": 0, + "content": "So, the answer appears to be A (hexagon)." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.489, + 0.408, + 0.498 + ], + "angle": 0, + "content": "Let me double-check if there's another pattern I'm missing:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.498, + 0.481, + 0.506 + ], + "angle": 0, + "content": "- The shapes aren't consistently \"shape within a shape\" so that's not the pattern" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.506, + 0.338, + 0.513 + ], + "angle": 0, + "content": "- It's not about filled vs. unfilled shapes" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.513, + 0.378, + 0.521 + ], + "angle": 0, + "content": "- It doesn't appear to be about rotation or reflection" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.489, + 0.481, + 0.521 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.528, + 0.454, + 0.537 + ], + "angle": 0, + "content": "The increasing number of sides seems to be the most consistent pattern:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.538, + 0.259, + 0.545 + ], + "angle": 0, + "content": "-∞ sides (circles)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.545, + 0.255, + 0.553 + ], + "angle": 0, + "content": "-3 sides (triangle)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.553, + 0.418, + 0.561 + ], + "angle": 0, + "content": "- Curved shape (represents \\(\\infty\\) again or could be a special case)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.561, + 0.29, + 0.569 + ], + "angle": 0, + "content": "- 4 sides (square/rectangle)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.569, + 0.262, + 0.576 + ], + "angle": 0, + "content": "- 5 sides (pentagon)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.576, + 0.368, + 0.584 + ], + "angle": 0, + "content": "- ? (logically 6 sides, which would be a hexagon)" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.538, + 0.418, + 0.584 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.59, + 0.424, + 0.6 + ], + "angle": 0, + "content": "Therefore, option A (hexagon) appears to be the correct answer." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.6, + 0.249, + 0.607 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.607, + 0.631, + 0.616 + ], + "angle": 0, + "content": "I need to identify the pattern in this sequence of figures and determine which option should replace the question mark." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.622, + 0.297, + 0.631 + ], + "angle": 0, + "content": "Let me analyze the sequence:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.631, + 0.287, + 0.638 + ], + "angle": 0, + "content": "1. Two overlapping circles" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.638, + 0.287, + 0.645 + ], + "angle": 0, + "content": "2. A triangle inside a circle" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.645, + 0.306, + 0.653 + ], + "angle": 0, + "content": "3. An oval with horizontal lines" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.653, + 0.33, + 0.66 + ], + "angle": 0, + "content": "4. A quadrilateral with a diagonal line" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.66, + 0.307, + 0.667 + ], + "angle": 0, + "content": "5.A pentagon (5-sided polygon)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.667, + 0.307, + 0.675 + ], + "angle": 0, + "content": "6.2 (This is what I need to find)" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.631, + 0.33, + 0.675 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.684, + 0.551, + 0.693 + ], + "angle": 0, + "content": "Looking at these shapes, I notice a potential pattern in the number of sides of the primary shapes:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.693, + 0.32, + 0.7 + ], + "angle": 0, + "content": "- First shape: Circles (infinite sides)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.7, + 0.314, + 0.708 + ], + "angle": 0, + "content": "- Second shape: Triangle (3 sides)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.708, + 0.345, + 0.716 + ], + "angle": 0, + "content": "- Third shape: Oval (curved shape)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.716, + 0.33, + 0.724 + ], + "angle": 0, + "content": "- Fourth shape: Quadrilateral (4 sides)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.724, + 0.307, + 0.732 + ], + "angle": 0, + "content": "- Fifth shape: Pentagon (5 sides)" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.693, + 0.345, + 0.732 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.738, + 0.813, + 0.755 + ], + "angle": 0, + "content": "It appears the pattern is showing an increase in the number of sides as we move from the triangle to quadrilateral to pentagon. Following this pattern, the next shape should have 6 sides." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.762, + 0.275, + 0.77 + ], + "angle": 0, + "content": "Looking at the options:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.77, + 0.336, + 0.778 + ], + "angle": 0, + "content": "- Option A: Hexagon (6-sided polygon)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.778, + 0.338, + 0.786 + ], + "angle": 0, + "content": "-Option B:Rectangle with diagonal line" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.786, + 0.295, + 0.794 + ], + "angle": 0, + "content": "- Option C: Square with grid" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.794, + 0.265, + 0.801 + ], + "angle": 0, + "content": "-Option D:Triangle" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.77, + 0.338, + 0.801 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.807, + 0.714, + 0.818 + ], + "angle": 0, + "content": "Since the logical progression is to a 6-sided figure after the 5-sided pentagon, option A (hexagon) is the correct answer to continue the pattern." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.824, + 0.231, + 0.832 + ], + "angle": 0, + "content": "Answer: A" + }, + { + "type": "title", + "bbox": [ + 0.186, + 0.837, + 0.38, + 0.849 + ], + "angle": 0, + "content": "Error Category: Reasoning Error" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.85, + 0.813, + 0.899 + ], + "angle": 0, + "content": "Error Reason: The model overcomplicates the pattern and misinterprets the progression of shapes. It assumes the sequence is based on increasing the number of polygon sides and selects the hexagon (option A), expecting a 6-sided figure to follow the 5-sided pentagon. However, the actual pattern does not follow this sequence, as could be reasoned from the first three figures." + }, + { + "type": "image_caption", + "bbox": [ + 0.235, + 0.913, + 0.761, + 0.93 + ], + "angle": 0, + "content": "Figure 36: A sample error case of Inductive Reasoning (difficulty: Hard)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "49" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.429, + 0.113, + 0.56, + 0.129 + ], + "angle": 0, + "content": "Inductive (Hard)" + }, + { + "type": "image", + "bbox": [ + 0.594, + 0.113, + 0.613, + 0.127 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.229, + 0.133, + 0.323, + 0.203 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.216, + 0.207, + 0.335, + 0.234 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.357, + 0.151, + 0.8, + 0.178 + ], + "angle": 0, + "content": "Question: Choose the most appropriate option from the four given choices to fill in the question mark, so that the figures follow a pattern." + }, + { + "type": "text", + "bbox": [ + 0.358, + 0.187, + 0.471, + 0.202 + ], + "angle": 0, + "content": "Ground Truth: (C)" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.242, + 0.812, + 0.26 + ], + "angle": 0, + "content": "Claude-3.7-Sonnet-Thinking: This question asks me to identify a pattern in a 3x3 grid of figures and then determine which of the four options (A, B, C, D) should replace the question mark to maintain that pattern." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.266, + 0.422, + 0.276 + ], + "angle": 0, + "content": "Let me analyze each row of the grid to understand the pattern:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.282, + 0.389, + 0.291 + ], + "angle": 0, + "content": "Row 1: Contains line drawings or outlines (thin lines)." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.291, + 0.359, + 0.298 + ], + "angle": 0, + "content": "Row 2: Contains solid black shapes (filled in)." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.298, + 0.492, + 0.307 + ], + "angle": 0, + "content": "Row 3: Contains circular patterns, with the third position having a question mark." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.313, + 0.541, + 0.323 + ], + "angle": 0, + "content": "Now I need to determine what pattern connects each column and row to find the right answer." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.329, + 0.28, + 0.337 + ], + "angle": 0, + "content": "Looking at each column:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.337, + 0.526, + 0.345 + ], + "angle": 0, + "content": "- Column 1: A line drawing at top, a solid shape in middle, and a circular pattern at bottom" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.345, + 0.334, + 0.353 + ], + "angle": 0, + "content": "- Column 2: Same structure as column 1" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.353, + 0.538, + 0.362 + ], + "angle": 0, + "content": "- Column 3: Same structure for the first two rows, but the third position has the question mark" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.368, + 0.411, + 0.376 + ], + "angle": 0, + "content": "Now let's look more closely at the circular patterns in row 3:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.376, + 0.532, + 0.385 + ], + "angle": 0, + "content": "- Position (3,1): Shows circles arranged in a pattern that seems to form two connected groups" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.385, + 0.516, + 0.392 + ], + "angle": 0, + "content": "- Position (3,2): Shows circles arranged in a different pattern, also with connected groups" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.398, + 0.735, + 0.408 + ], + "angle": 0, + "content": "I need to determine what the relationship is between the patterns in the first two rows and how that might indicate what should be in position (3,3)." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.415, + 0.277, + 0.423 + ], + "angle": 0, + "content": "Looking at row 1 again:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.423, + 0.333, + 0.431 + ], + "angle": 0, + "content": "- (1,1): Lines forming an angular shape" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.431, + 0.353, + 0.438 + ], + "angle": 0, + "content": "- (1,2): Lines forming another angular shape" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.438, + 0.386, + 0.446 + ], + "angle": 0, + "content": "- (1,3): Lines forming a third angular shape with an X" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.453, + 0.214, + 0.46 + ], + "angle": 0, + "content": "Row 2:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.461, + 0.31, + 0.469 + ], + "angle": 0, + "content": "- (2,1): Solid black angular shape" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.469, + 0.31, + 0.477 + ], + "angle": 0, + "content": "- (2,2): Solid black angular shape" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.477, + 0.31, + 0.485 + ], + "angle": 0, + "content": "- (2,3): Solid black angular shape" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.492, + 0.696, + 0.502 + ], + "angle": 0, + "content": "I notice that each row seems to maintain the same visual \"language\" (lines, solid shapes, or circles), but the specific arrangement changes." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.507, + 0.523, + 0.517 + ], + "angle": 0, + "content": "Let me check if there's a transformation happening across each row or down each column." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.523, + 0.565, + 0.533 + ], + "angle": 0, + "content": "One possibility is that row 3 should continue some pattern from rows 1 and 2. Looking at the options:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.539, + 0.334, + 0.547 + ], + "angle": 0, + "content": "Option A: Circles arranged in a pattern" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.548, + 0.334, + 0.557 + ], + "angle": 0, + "content": "Option B: Circles arranged in a pattern" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.557, + 0.334, + 0.565 + ], + "angle": 0, + "content": "Option C: Circles arranged in a pattern" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.565, + 0.334, + 0.572 + ], + "angle": 0, + "content": "Option D: Circles arranged in a pattern" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.578, + 0.374, + 0.586 + ], + "angle": 0, + "content": "Looking purely at the row 3 pattern development:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.586, + 0.433, + 0.594 + ], + "angle": 0, + "content": "- (3,1) has circles arranged in what appears to be a specific pattern" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.594, + 0.366, + 0.602 + ], + "angle": 0, + "content": "- (3,2) has circles arranged in a different pattern" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.602, + 0.41, + 0.61 + ], + "angle": 0, + "content": "- (3,3) should continue this progression in some logical way" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.616, + 0.812, + 0.634 + ], + "angle": 0, + "content": "Given the progression from (3,1) to (3,2), and looking at the overall pattern of the grid, the most logical answer appears to be option C, as it seems to follow the pattern of arrangements in row 3." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.64, + 0.812, + 0.656 + ], + "angle": 0, + "content": "However, I'm not completely confident without being able to precisely determine the transformation rule. Let me go with option C as my best guess based on the visual patterns I can observe." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.656, + 0.248, + 0.664 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.664, + 0.334, + 0.672 + ], + "angle": 0, + "content": "# Pattern Analysis for Figure Sequence" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.679, + 0.551, + 0.688 + ], + "angle": 0, + "content": "I need to identify the pattern in this \\(3 \\times 3\\) grid to determine what should replace the question mark." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.695, + 0.276, + 0.703 + ], + "angle": 0, + "content": "Analyzing the rows:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.703, + 0.389, + 0.712 + ], + "angle": 0, + "content": "1. **Row 1**: Contains simple line drawings (outlines)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.712, + 0.401, + 0.719 + ], + "angle": 0, + "content": "2.**Row 2**: Contains solid black shapes (filled versions)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.719, + 0.36, + 0.727 + ], + "angle": 0, + "content": "3.**Row \\(3^{**}\\): Contains arrangements of circles" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.734, + 0.289, + 0.742 + ], + "angle": 0, + "content": "Analyzing the columns:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.742, + 0.401, + 0.75 + ], + "angle": 0, + "content": "Each column follows a consistent transformation pattern:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.75, + 0.339, + 0.757 + ], + "angle": 0, + "content": "- The top row shows a basic line pattern" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.758, + 0.396, + 0.765 + ], + "angle": 0, + "content": "- The middle row shows the same pattern filled in black" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.766, + 0.406, + 0.773 + ], + "angle": 0, + "content": "- The bottom row represents the same pattern using circles" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.78, + 0.308, + 0.788 + ], + "angle": 0, + "content": "## Looking at the third column:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.788, + 0.406, + 0.796 + ], + "angle": 0, + "content": "- Position (1,3): A line pattern with an \"X\" and some angles" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.796, + 0.439, + 0.804 + ], + "angle": 0, + "content": "- Position (2,3): A solid version with square and triangular elements" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.804, + 0.449, + 0.812 + ], + "angle": 0, + "content": "- Position (3,3): Should show the same pattern represented with circles" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.819, + 0.288, + 0.827 + ], + "angle": 0, + "content": "Examining the options:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.827, + 0.431, + 0.835 + ], + "angle": 0, + "content": "- \\(^{**}\\)Option A\\*\\*: Circles arranged in an L-shape with some outliers" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.835, + 0.378, + 0.844 + ], + "angle": 0, + "content": "- \\(^{**}\\)Option B\\*\\*: Circles arranged in a zigzag pattern" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.844, + 0.489, + 0.851 + ], + "angle": 0, + "content": "- \\(^{**}\\)Option C\\*: Circles arranged in a figure-8 or loop pattern with outlying circles" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.851, + 0.45, + 0.859 + ], + "angle": 0, + "content": "- \\(^{**}\\)Option D\\*\\*: Circles arranged in a C-shape with additional elements" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.865, + 0.812, + 0.883 + ], + "angle": 0, + "content": "Looking at how the circle arrangements in positions (3,1) and (3,2) correspond to their respective shapes above them, option C best represents the pattern in the third column. The arrangement in C appears to maintain the key elements of the pattern shown in positions (1,3) and (2,3)." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.889, + 0.229, + 0.897 + ], + "angle": 0, + "content": "Answer: C" + }, + { + "type": "image_caption", + "bbox": [ + 0.229, + 0.913, + 0.767, + 0.93 + ], + "angle": 0, + "content": "Figure 37: A sample correct case of Inductive Reasoning (difficulty: Hard)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "50" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.44, + 0.113, + 0.55, + 0.13 + ], + "angle": 0, + "content": "Spatial (Easy)" + }, + { + "type": "image", + "bbox": [ + 0.594, + 0.113, + 0.614, + 0.128 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.274, + 0.138, + 0.396, + 0.257 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.449, + 0.174, + 0.747, + 0.188 + ], + "angle": 0, + "content": "Question: How many blocks are in this 3D tower?" + }, + { + "type": "text", + "bbox": [ + 0.451, + 0.188, + 0.627, + 0.2 + ], + "angle": 0, + "content": "Options:A:7.B:8.C:9.D:10" + }, + { + "type": "text", + "bbox": [ + 0.45, + 0.212, + 0.563, + 0.226 + ], + "angle": 0, + "content": "Ground Truth: (C)" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.262, + 0.691, + 0.273 + ], + "angle": 0, + "content": "Claude-3.7-Sonnet-Thinking: I need to count the number of blocks in this 3D tower. Let me analyze the structure carefully." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.278, + 0.733, + 0.288 + ], + "angle": 0, + "content": "The tower appears to have several levels with blocks arranged in a step-like pattern. I'll try to count methodically by identifying each visible block." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.294, + 0.27, + 0.302 + ], + "angle": 0, + "content": "Looking at the image:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.303, + 0.478, + 0.311 + ], + "angle": 0, + "content": "- At the bottom level (ground level), I can see 3 blocks arranged in an L-shape." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.311, + 0.346, + 0.318 + ], + "angle": 0, + "content": "- At the second level up, I can see 2 blocks." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.318, + 0.316, + 0.326 + ], + "angle": 0, + "content": "- At the top level, I can see 1 block." + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.303, + 0.478, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.333, + 0.308, + 0.342 + ], + "angle": 0, + "content": "So far that's \\( 3 + 2 + 1 = 6 \\) blocks." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.348, + 0.672, + 0.358 + ], + "angle": 0, + "content": "However, I need to be careful as this is a 3D structure and there might be blocks that are partially or completely hidden from view." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.364, + 0.752, + 0.373 + ], + "angle": 0, + "content": "Bottom level: I see what appears to be an L-shape with 3 visible cubes. There might be a 4th cube completing the square \\((2\\times 2)\\), but it's not clearly visible." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.373, + 0.287, + 0.38 + ], + "angle": 0, + "content": "Middle level: I see 2 cubes." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.38, + 0.272, + 0.387 + ], + "angle": 0, + "content": "Top level: I see 1 cube." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.387, + 0.811, + 0.404 + ], + "angle": 0, + "content": "Given the perspective and the way 3D blocks are usually represented, it seems most likely that there are 7 blocks total \\((3 + 3 + 1)\\) but I need to be very careful about blocks that may be hidden from view." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.411, + 0.362, + 0.419 + ], + "angle": 0, + "content": "Actually, let me count one more time carefully:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.419, + 0.282, + 0.426 + ], + "angle": 0, + "content": "1. Bottom front left block" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.426, + 0.288, + 0.434 + ], + "angle": 0, + "content": "2. Bottom front right block" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.434, + 0.362, + 0.442 + ], + "angle": 0, + "content": "3. Bottom back left block (creating the L-shape)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.442, + 0.267, + 0.45 + ], + "angle": 0, + "content": "4.Middle front block" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.45, + 0.267, + 0.457 + ], + "angle": 0, + "content": "5.Middle back block" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.457, + 0.234, + 0.465 + ], + "angle": 0, + "content": "6. Top block" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.419, + 0.362, + 0.465 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.465, + 0.79, + 0.474 + ], + "angle": 0, + "content": "7. Bottom back right block (which would complete the \\(2 \\times 2\\) square at the bottom) - this one is not clearly visible but would typically be present in a stable structure" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.481, + 0.262, + 0.49 + ], + "angle": 0, + "content": "That gives 7 blocks." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.496, + 0.41, + 0.505 + ], + "angle": 0, + "content": "After careful consideration, I believe the answer is 7 blocks." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.511, + 0.476, + 0.522 + ], + "angle": 0, + "content": "Actually, I'm not confident about my count. Let me try to visualize this again:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.528, + 0.293, + 0.535 + ], + "angle": 0, + "content": "The structure appears to be:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.535, + 0.394, + 0.544 + ], + "angle": 0, + "content": "- Bottom layer: Looks like a \\(2 \\times 2\\) arrangement (4 blocks)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.544, + 0.33, + 0.552 + ], + "angle": 0, + "content": "- Middle layer: Appears to be 2 blocks" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.552, + 0.26, + 0.56 + ], + "angle": 0, + "content": "-Top layer:1 block" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.566, + 0.355, + 0.575 + ], + "angle": 0, + "content": "That would give us \\( 4 + 2 + 1 = 7 \\) blocks total." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.582, + 0.509, + 0.591 + ], + "angle": 0, + "content": "But wait, the image shows what looks like steps. Let me try to count each visible cube:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.591, + 0.259, + 0.598 + ], + "angle": 0, + "content": "1. Bottom front left" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.598, + 0.266, + 0.606 + ], + "angle": 0, + "content": "2. Bottom front right" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.606, + 0.241, + 0.613 + ], + "angle": 0, + "content": "3.Middle left" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.613, + 0.245, + 0.621 + ], + "angle": 0, + "content": "4.Middle right" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.621, + 0.241, + 0.629 + ], + "angle": 0, + "content": "5. Top middle" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.629, + 0.324, + 0.637 + ], + "angle": 0, + "content": "6. Bottom back left (partially visible)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.637, + 0.333, + 0.645 + ], + "angle": 0, + "content": "7. Bottom back right (might be hidden)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.645, + 0.555, + 0.653 + ], + "angle": 0, + "content": "Based on what I can see in the image, there appear to be 7 blocks, which corresponds to option (A)." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.653, + 0.249, + 0.661 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.661, + 0.32, + 0.668 + ], + "angle": 0, + "content": "# Counting Blocks in the 3D Tower" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.675, + 0.698, + 0.684 + ], + "angle": 0, + "content": "To solve this problem, I need to carefully count all the blocks in this 3D structure, including any that might be partially hidden from view." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.684, + 0.477, + 0.692 + ], + "angle": 0, + "content": "Let me work through this systematically by analyzing each level of the tower:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.699, + 0.308, + 0.706 + ], + "angle": 0, + "content": "Bottom Level (ground level):" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.706, + 0.402, + 0.715 + ], + "angle": 0, + "content": "I can see what appears to be a \\(2 \\times 2\\) arrangement of blocks:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.715, + 0.32, + 0.722 + ], + "angle": 0, + "content": "That's 4 blocks on the bottom level." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.73, + 0.253, + 0.738 + ], + "angle": 0, + "content": "Middle Level:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.738, + 0.303, + 0.745 + ], + "angle": 0, + "content": "Moving up one level, I can see:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.745, + 0.364, + 0.753 + ], + "angle": 0, + "content": "- Middle left block (above the bottom front left)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.753, + 0.369, + 0.761 + ], + "angle": 0, + "content": "- Middle right block (above the bottom back left)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.761, + 0.319, + 0.769 + ], + "angle": 0, + "content": "That's 2 blocks on the middle level." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.776, + 0.24, + 0.784 + ], + "angle": 0, + "content": "Top Level:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.784, + 0.28, + 0.793 + ], + "angle": 0, + "content": "At the very top, there is:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.793, + 0.382, + 0.801 + ], + "angle": 0, + "content": "- One block positioned above the middle right block" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.801, + 0.302, + 0.809 + ], + "angle": 0, + "content": "That's 1 block on the top level." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.816, + 0.248, + 0.823 + ], + "angle": 0, + "content": "Total Count:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.823, + 0.346, + 0.831 + ], + "angle": 0, + "content": "Adding all levels: \\(4 + 2 + 1 = 7\\) blocks total" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.831, + 0.357, + 0.839 + ], + "angle": 0, + "content": "Therefore, there are 7 blocks in this 3D tower." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.847, + 0.229, + 0.855 + ], + "angle": 0, + "content": "Answer: A" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.861, + 0.441, + 0.874 + ], + "angle": 0, + "content": "Error Category: Spatial / Orientation Error" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.874, + 0.811, + 0.886 + ], + "angle": 0, + "content": "Error Reason: The model makes a spatial reasoning error by assuming only 3 blocks at the bottom level," + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.886, + 0.756, + 0.899 + ], + "angle": 0, + "content": "overlooking a likely 4th hidden cube that would complete the L-shape into a full \\(2 \\times 2\\) square base." + }, + { + "type": "image_caption", + "bbox": [ + 0.247, + 0.913, + 0.75, + 0.93 + ], + "angle": 0, + "content": "Figure 38: A sample error case of Spatial Reasoning (difficulty: Easy)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "51" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.44, + 0.113, + 0.55, + 0.13 + ], + "angle": 0, + "content": "Spatial (Easy)" + }, + { + "type": "image", + "bbox": [ + 0.594, + 0.113, + 0.614, + 0.127 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.142, + 0.456, + 0.201 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.463, + 0.137, + 0.813, + 0.174 + ], + "angle": 0, + "content": "Question: Some kids are facing towards front and others are facing towards back. How many kids are holding another kid's hand with their right hand?" + }, + { + "type": "text", + "bbox": [ + 0.465, + 0.174, + 0.637, + 0.186 + ], + "angle": 0, + "content": "Options: A: 2. B: 6. C: 5. D: 1" + }, + { + "type": "text", + "bbox": [ + 0.465, + 0.19, + 0.577, + 0.204 + ], + "angle": 0, + "content": "Ground Truth: (C)" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.209, + 0.632, + 0.222 + ], + "angle": 0, + "content": "Claude-3.7-Sonnet-Thinking: Let's analyze the image and the problem:" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.231, + 0.81, + 0.255 + ], + "angle": 0, + "content": "In the image, I can see 6 kids standing in a chain holding hands with each other. Some are facing forward and some are facing backward." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.264, + 0.671, + 0.276 + ], + "angle": 0, + "content": "The question asks: \"How many kids are holding another kid's hand with their right hand?\"" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.276, + 0.775, + 0.288 + ], + "angle": 0, + "content": "To solve this, I need to identify for each kid whether they are using their right hand to hold another kid's hand." + }, + { + "type": "title", + "bbox": [ + 0.186, + 0.298, + 0.317, + 0.31 + ], + "angle": 0, + "content": "Going from left to right:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.31, + 0.81, + 0.332 + ], + "angle": 0, + "content": "1. Kid 1 (blonde hair, purple dress): This kid appears to be facing forward. Her right hand would be on her right side from our perspective. She's using her right hand to hold the hand of Kid 2." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.332, + 0.659, + 0.342 + ], + "angle": 0, + "content": "2. Kid 2 (gray hair): Appears to be facing forward. Using right hand to hold Kid 3's hand." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.342, + 0.673, + 0.354 + ], + "angle": 0, + "content": "3. Kid 3 (green outfit): Appears to be facing forward. Using right hand to hold Kid 4's hand." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.354, + 0.809, + 0.376 + ], + "angle": 0, + "content": "4. Kid 4 (black outfit): Appears to be facing backward (away from us). From Kid 4's perspective, their right hand would be on our left. So Kid 4 is using their right hand to hold Kid 3's hand." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.376, + 0.736, + 0.387 + ], + "angle": 0, + "content": "5. Kid 5 (pink hair, yellow skirt): Appears to be facing backward. Using right hand to hold Kid 4's hand." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.387, + 0.794, + 0.4 + ], + "angle": 0, + "content": "6. Kid 6 (light green/mint hair): Appears to be facing forward. Using right hand to not hold anyone (end of chain)." + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.31, + 0.81, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.409, + 0.611, + 0.421 + ], + "angle": 0, + "content": "So, counting the kids who are using their right hand to hold another kid's hand:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.421, + 0.242, + 0.43 + ], + "angle": 0, + "content": "Kid 1: Yes" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.432, + 0.242, + 0.442 + ], + "angle": 0, + "content": "Kid 2: Yes" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.443, + 0.242, + 0.453 + ], + "angle": 0, + "content": "Kid 3: Yes" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.453, + 0.242, + 0.464 + ], + "angle": 0, + "content": "Kid 4: Yes" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.465, + 0.242, + 0.475 + ], + "angle": 0, + "content": "Kid 5: Yes" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.476, + 0.316, + 0.488 + ], + "angle": 0, + "content": "Kid 6: No (end of chain)" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.498, + 0.56, + 0.511 + ], + "angle": 0, + "content": "That gives us 5 kids using their right hand to hold another kid's hand." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.52, + 0.81, + 0.544 + ], + "angle": 0, + "content": "However, I need to double-check my reasoning about which direction each kid is facing, as this affects which hand is their right hand." + }, + { + "type": "title", + "bbox": [ + 0.186, + 0.554, + 0.317, + 0.565 + ], + "angle": 0, + "content": "Looking more carefully:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.565, + 0.561, + 0.576 + ], + "angle": 0, + "content": "- Kid 1 (blonde): Facing forward, right hand holding Kid 2's hand. Yes." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.577, + 0.572, + 0.588 + ], + "angle": 0, + "content": "- Kid 2 (gray hair): Facing forward, right hand holding Kid 3's hand. Yes." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.588, + 0.586, + 0.599 + ], + "angle": 0, + "content": "- Kid 3 (green outfit): Facing forward, right hand holding Kid 4's hand. Yes." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.599, + 0.722, + 0.61 + ], + "angle": 0, + "content": "- Kid 4 (black outfit): Facing backward, right hand (from their perspective) holding Kid 3's hand. Yes." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.61, + 0.582, + 0.621 + ], + "angle": 0, + "content": "- Kid 5 (pink hair): Facing backward, right hand holding Kid 4's hand. Yes." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.621, + 0.603, + 0.632 + ], + "angle": 0, + "content": "- Kid 6 (mint hair): Facing forward, right hand not holding anyone's hand. No." + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.565, + 0.722, + 0.632 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.642, + 0.551, + 0.655 + ], + "angle": 0, + "content": "So, there are 5 kids holding another kid's hand with their right hand." + }, + { + "type": "title", + "bbox": [ + 0.186, + 0.665, + 0.336, + 0.676 + ], + "angle": 0, + "content": "The answer should be (C) 5." + }, + { + "type": "title", + "bbox": [ + 0.187, + 0.677, + 0.274, + 0.688 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.688, + 0.81, + 0.71 + ], + "angle": 0, + "content": "I need to determine which kids are holding another kid's hand with their right hand by analyzing the image and understanding each child's orientation." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.72, + 0.809, + 0.744 + ], + "angle": 0, + "content": "Looking at the image, I can see 6 kids standing in a chain holding hands with each other. Some are facing forward and some are facing backward." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.754, + 0.42, + 0.766 + ], + "angle": 0, + "content": "Let me analyze each child from left to right:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.776, + 0.808, + 0.787 + ], + "angle": 0, + "content": "1. First child (blonde girl in purple dress): Facing forward. She's using her right hand to hold the second child's hand." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.787, + 0.672, + 0.798 + ], + "angle": 0, + "content": "2. Second child (gray hair): Facing forward. Using right hand to hold the third child's hand." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.799, + 0.683, + 0.809 + ], + "angle": 0, + "content": "3. Third child (green outfit): Facing forward. Using right hand to hold the fourth child's hand." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.81, + 0.81, + 0.831 + ], + "angle": 0, + "content": "4. Fourth child (black outfit): Facing backward. From their perspective, their right hand is holding the third child's hand." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.832, + 0.766, + 0.842 + ], + "angle": 0, + "content": "5. Fifth child (pink hair in yellow skirt): Facing backward. Their right hand is holding the fourth child's hand." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.843, + 0.81, + 0.866 + ], + "angle": 0, + "content": "6. Sixth child (light-colored hair): Facing forward. This child is at the end of the chain on the right, so their right hand is not holding anyone's hand." + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.776, + 0.81, + 0.866 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.876, + 0.81, + 0.898 + ], + "angle": 0, + "content": "Counting the kids who are using their right hand to hold another kid's hand: 1st, 2nd, 3rd, 4th, and 5th kids = 5 children total." + }, + { + "type": "image_caption", + "bbox": [ + 0.24, + 0.914, + 0.757, + 0.93 + ], + "angle": 0, + "content": "Figure 39: A sample correct case of Spatial Reasoning (difficulty: Easy)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "52" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.425, + 0.112, + 0.565, + 0.13 + ], + "angle": 0, + "content": "Spatial (Medium)" + }, + { + "type": "image", + "bbox": [ + 0.594, + 0.113, + 0.614, + 0.128 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.277, + 0.14, + 0.313, + 0.168 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.192, + 0.18, + 0.396, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.212, + 0.237, + 0.223, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.264, + 0.238, + 0.274, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.315, + 0.238, + 0.325, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.368, + 0.237, + 0.378, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.408, + 0.172, + 0.79, + 0.185 + ], + "angle": 0, + "content": "Question: Which cube can be made based on the unfolded cube?" + }, + { + "type": "text", + "bbox": [ + 0.409, + 0.201, + 0.521, + 0.214 + ], + "angle": 0, + "content": "Ground Truth: (A)" + }, + { + "type": "title", + "bbox": [ + 0.187, + 0.251, + 0.345, + 0.259 + ], + "angle": 0, + "content": "Claude-3.7-Sonnet-Thinking: " + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.26, + 0.457, + 0.267 + ], + "angle": 0, + "content": "Let me analyze the folded cube first to understand what I'm looking for:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.267, + 0.522, + 0.276 + ], + "angle": 0, + "content": "- The cube appears to be mostly gold/yellow with black triangular patterns on some faces" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.276, + 0.297, + 0.283 + ], + "angle": 0, + "content": "- I can see 3 faces of the cube:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.283, + 0.428, + 0.292 + ], + "angle": 0, + "content": "- Front face: Has a black triangular pattern pointing downward" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.292, + 0.421, + 0.3 + ], + "angle": 0, + "content": "- Top face: Has a black triangular pattern pointing to the right" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.3, + 0.422, + 0.308 + ], + "angle": 0, + "content": "-Right face: Has a black triangular pattern pointing to the left" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.26, + 0.522, + 0.308 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.313, + 0.812, + 0.331 + ], + "angle": 0, + "content": "Now I need to examine each unfolded pattern to see which one would create these patterns when folded into a cube. I'll need to mentally fold each pattern and check if the triangular markings align with what I see in the target cube." + }, + { + "type": "title", + "bbox": [ + 0.187, + 0.337, + 0.244, + 0.345 + ], + "angle": 0, + "content": "For option (A):" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.345, + 0.297, + 0.352 + ], + "angle": 0, + "content": "The unfolded pattern shows:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.353, + 0.376, + 0.361 + ], + "angle": 0, + "content": "- A T-shape with triangular patterns on some faces" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.361, + 0.765, + 0.37 + ], + "angle": 0, + "content": "- When I mentally fold this, the triangular patterns don't seem to align with the target cube. The arrangement doesn't match what we see in the folded cube." + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.353, + 0.765, + 0.37 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.187, + 0.376, + 0.243, + 0.384 + ], + "angle": 0, + "content": "For option (B):" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.385, + 0.285, + 0.392 + ], + "angle": 0, + "content": "The unfolded pattern has:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.393, + 0.353, + 0.4 + ], + "angle": 0, + "content": "- A cross-like shape with triangular patterns" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.4, + 0.63, + 0.408 + ], + "angle": 0, + "content": "- When folded, this arrangement might create the patterns we see in the target cube. I need to check this more carefully." + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.385, + 0.63, + 0.408 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.187, + 0.415, + 0.243, + 0.423 + ], + "angle": 0, + "content": "For option (C):" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.423, + 0.287, + 0.431 + ], + "angle": 0, + "content": "This unfolded pattern has:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.431, + 0.416, + 0.439 + ], + "angle": 0, + "content": "- An L-shape with additional squares and triangular patterns" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.439, + 0.461, + 0.447 + ], + "angle": 0, + "content": "- When mentally folded, the patterns don't seem to match the target cube." + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.423, + 0.461, + 0.447 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.187, + 0.454, + 0.243, + 0.462 + ], + "angle": 0, + "content": "For option (D):" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.462, + 0.287, + 0.47 + ], + "angle": 0, + "content": "This unfolded pattern has:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.47, + 0.442, + 0.478 + ], + "angle": 0, + "content": "- A cross-like shape with triangular patterns in different orientations" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.478, + 0.466, + 0.485 + ], + "angle": 0, + "content": "- When mentally folded, I need to check if this could create the target cube." + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.462, + 0.466, + 0.485 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.492, + 0.698, + 0.501 + ], + "angle": 0, + "content": "Let me think about this more methodically. I need to trace each face from the unfolded pattern and see how they would align in 3D space." + }, + { + "type": "title", + "bbox": [ + 0.187, + 0.508, + 0.326, + 0.516 + ], + "angle": 0, + "content": "Looking at option (B) more carefully:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.517, + 0.556, + 0.524 + ], + "angle": 0, + "content": "If I consider the cross-shape as the standard cube net, with the center becoming the front face, then:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.524, + 0.594, + 0.532 + ], + "angle": 0, + "content": "- The center face has a triangular pattern pointing downward, which matches the front face of the target cube" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.532, + 0.729, + 0.54 + ], + "angle": 0, + "content": "- The face to the right of center would become the right face, and it has a triangular pattern that would point left when folded, matching the target" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.54, + 0.715, + 0.548 + ], + "angle": 0, + "content": "- The face above the center would become the top face, and it has a triangular pattern that would point right when folded, matching the target" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.548, + 0.419, + 0.555 + ], + "angle": 0, + "content": "This seems to be a match for the folded cube shown at the top." + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.517, + 0.729, + 0.555 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.562, + 0.812, + 0.579 + ], + "angle": 0, + "content": "Let me double-check option (D): The arrangement of the triangular patterns doesn't seem to match what we see in the folded cube, particularly the orientation of the triangles." + }, + { + "type": "title", + "bbox": [ + 0.187, + 0.586, + 0.332, + 0.594 + ], + "angle": 0, + "content": "I think option (B) is the correct answer." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.595, + 0.248, + 0.602 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.602, + 0.419, + 0.61 + ], + "angle": 0, + "content": "Analyzing Which Unfolded Pattern Creates the Target Cube" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.617, + 0.675, + 0.626 + ], + "angle": 0, + "content": "I need to determine which of the unfolded cube patterns (a, b, c, d) would create the folded cube shown at the top when assembled." + }, + { + "type": "title", + "bbox": [ + 0.187, + 0.632, + 0.303, + 0.641 + ], + "angle": 0, + "content": "Examining the Target Cube" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.642, + 0.376, + 0.649 + ], + "angle": 0, + "content": "First, I'll identify what's visible on the folded cube:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.649, + 0.423, + 0.657 + ], + "angle": 0, + "content": "- Front face: Has a black triangular pattern pointing downward" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.658, + 0.419, + 0.666 + ], + "angle": 0, + "content": "- Top face: Has a black triangular pattern pointing to the right" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.666, + 0.418, + 0.673 + ], + "angle": 0, + "content": "- Right face: Has a black triangular pattern pointing to the left" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.642, + 0.423, + 0.673 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.679, + 0.327, + 0.688 + ], + "angle": 0, + "content": "Analyzing Each Unfolded Pattern" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.695, + 0.812, + 0.712 + ], + "angle": 0, + "content": "Option (a): When folded, the triangular patterns wouldn't match the configuration in the target cube. The orientations and positions of the triangles don't align correctly." + }, + { + "type": "title", + "bbox": [ + 0.187, + 0.718, + 0.358, + 0.726 + ], + "angle": 0, + "content": "Option (b): If I mentally fold this pattern:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.727, + 0.473, + 0.734 + ], + "angle": 0, + "content": "- The central face (with downward-pointing triangle) would be the front face" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.735, + 0.488, + 0.742 + ], + "angle": 0, + "content": "- The face above it (with rightward-pointing triangle) would become the top face" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.743, + 0.534, + 0.75 + ], + "angle": 0, + "content": "- The face to the right of center (with leftward-pointing triangle) would become the right face" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.75, + 0.399, + 0.758 + ], + "angle": 0, + "content": "This perfectly matches the pattern seen in the target cube." + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.727, + 0.534, + 0.758 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.765, + 0.812, + 0.781 + ], + "angle": 0, + "content": "Option (c): The arrangement of faces and triangular patterns in this unfolded net wouldn't create the target cube. The orientations of the triangles don't match when folded." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.788, + 0.647, + 0.797 + ], + "angle": 0, + "content": "Option (d): While this has triangular patterns, when folded the orientations don't match what we see in the target cube." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.803, + 0.546, + 0.813 + ], + "angle": 0, + "content": "Therefore, option (b) is the only unfolded pattern that would create the target cube when folded." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.819, + 0.228, + 0.827 + ], + "angle": 0, + "content": "Answer: B" + }, + { + "type": "title", + "bbox": [ + 0.187, + 0.833, + 0.44, + 0.845 + ], + "angle": 0, + "content": "Error Category: Spatial / Orientation Error" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.846, + 0.812, + 0.893 + ], + "angle": 0, + "content": "Error Reason: The model incorrectly visualizes how the triangular patterns on the unfolded net align when folded into a cube. It selects option B, mistakenly believing the triangle orientations will match the folded view, but fails to correctly account for how the triangles rotate and face different directions when assembled." + }, + { + "type": "image_caption", + "bbox": [ + 0.233, + 0.913, + 0.763, + 0.93 + ], + "angle": 0, + "content": "Figure 40: A sample error case of Spatial Reasoning (difficulty: Medium)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "53" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.425, + 0.113, + 0.564, + 0.129 + ], + "angle": 0, + "content": "Spatial (Medium)" + }, + { + "type": "image", + "bbox": [ + 0.37, + 0.136, + 0.416, + 0.187 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.43, + 0.136, + 0.477, + 0.187 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.495, + 0.136, + 0.54, + 0.187 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.555, + 0.136, + 0.582, + 0.187 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.599, + 0.136, + 0.62, + 0.188 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.194, + 0.812, + 0.243 + ], + "angle": 0, + "content": "Question: Paulo took a rectangular sheet of paper, yellow on one side and green on the other side and, with several folds shown in the dotted lines in the figure below, made a little paper plane. To give the airplane a charm, Paulo made a circular hole, marked on the last figure. After playing a lot with the plane, Paulo unfolded the sheet and realized that there were several holes in it. How many holes did he count?" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.243, + 0.359, + 0.255 + ], + "angle": 0, + "content": "Options: A: 6. B: 7. C: 8. D: 9" + }, + { + "type": "title", + "bbox": [ + 0.186, + 0.258, + 0.298, + 0.271 + ], + "angle": 0, + "content": "Ground Truth: (C)" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.274, + 0.5, + 0.285 + ], + "angle": 0, + "content": "Claude-3.7-Sonnet-Thinking: Let me analyze this problem step by step." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.29, + 0.809, + 0.308 + ], + "angle": 0, + "content": "We're looking at a sheet of paper that is folded in several steps to make a paper plane, and then a circular hole is punched through the folded paper. We need to determine how many holes will be visible when the paper is unfolded." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.313, + 0.286, + 0.322 + ], + "angle": 0, + "content": "From the image, I can see:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.323, + 0.478, + 0.331 + ], + "angle": 0, + "content": "1. A rectangular yellow sheet of paper (yellow on one side, green on the other)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.331, + 0.397, + 0.338 + ], + "angle": 0, + "content": "2. The paper is folded along dotted lines in several steps" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.338, + 0.486, + 0.346 + ], + "angle": 0, + "content": "3. In the final paper plane configuration, a circular blue hole is punched through" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.323, + 0.486, + 0.346 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.352, + 0.81, + 0.37 + ], + "angle": 0, + "content": "When a hole is punched through multiple layers of folded paper, it creates holes in each layer. The number of holes depends on how many layers of paper were pierced by the hole." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.376, + 0.304, + 0.386 + ], + "angle": 0, + "content": "Looking at the folding process:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.392, + 0.546, + 0.401 + ], + "angle": 0, + "content": "- Second image: Top portion folded to create a triangular shape (green side now showing on top)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.401, + 0.402, + 0.408 + ], + "angle": 0, + "content": "- Third image: Side portions folded (more green showing)" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.408, + 0.306, + 0.416 + ], + "angle": 0, + "content": "- Fourth image: Further folding" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.416, + 0.397, + 0.424 + ], + "angle": 0, + "content": "- Fifth image: Final paper plane with a blue circular hole" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.392, + 0.546, + 0.424 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.43, + 0.768, + 0.44 + ], + "angle": 0, + "content": "To determine the number of holes, I need to figure out how many layers of paper were at the location where the hole was punched in the final configuration." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.445, + 0.569, + 0.455 + ], + "angle": 0, + "content": "It's a bit difficult to precisely determine from just these diagrams, but I can make a reasonable analysis:" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.461, + 0.811, + 0.479 + ], + "angle": 0, + "content": "In a typical paper airplane, the center fold creates 2 layers, and then additional folds along the sides can create more layers. The hole appears to be punched near the tip of one wing." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.485, + 0.81, + 0.503 + ], + "angle": 0, + "content": "When we fold paper, the hole punched through multiple layers will create a hole in each layer. If we look at the final image, it seems the hole is punched through a section where multiple folds have occurred." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.507, + 0.733, + 0.518 + ], + "angle": 0, + "content": "Based on the folding pattern shown, the paper appears to have been folded in a way that would result in multiple layers at the location of the hole." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.523, + 0.81, + 0.542 + ], + "angle": 0, + "content": "Without seeing exactly how many layers overlap at the punch point, it's hard to give a definitive answer. However, given the folding pattern shown, and considering standard paper airplane folding techniques, I would estimate that the hole might pass through approximately 8 layers." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.546, + 0.636, + 0.557 + ], + "angle": 0, + "content": "However, since this is a multiple-choice question with specific options, I should choose among (A) 6, (B) 7, (C) 8, or (D) 9." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.562, + 0.811, + 0.58 + ], + "angle": 0, + "content": "Given the complexity of the folds and the location of the hole in the final plane, and looking at the options, I believe option (C) 8 is the most reasonable answer. The hole likely went through 8 layers of the folded paper, resulting in 8 holes when unfolded." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.585, + 0.776, + 0.595 + ], + "angle": 0, + "content": "Actual, let me think about this more carefully. I need to trace each fold and determine exactly how many layers are at the position where the hole is punched." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.602, + 0.52, + 0.611 + ], + "angle": 0, + "content": "The hole appears to be near one tip of the final paper plane. Based on the folding pattern:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.611, + 0.405, + 0.619 + ], + "angle": 0, + "content": "1. The initial fold creates a triangle on top (1 fold, 2 layers)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.619, + 0.321, + 0.626 + ], + "angle": 0, + "content": "2. Then side folds create more layers" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.626, + 0.353, + 0.634 + ], + "angle": 0, + "content": "3. The final folds create the completed plane" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.611, + 0.405, + 0.634 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.64, + 0.729, + 0.65 + ], + "angle": 0, + "content": "Typical paper airplanes can have different numbers of layers at different points. The tip might have 8 layers if several folds pass through that area." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.656, + 0.558, + 0.665 + ], + "angle": 0, + "content": "Given the options and the standard folding patterns for paper planes, I'll go with option (C) 8 holes." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.665, + 0.249, + 0.672 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.672, + 0.34, + 0.681 + ], + "angle": 0, + "content": "Counting Holes in the Unfolded Paper" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.687, + 0.691, + 0.697 + ], + "angle": 0, + "content": "I need to determine how many holes appear when Paulo unfolds his paper plane after making a single circular hole in the folded plane." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.703, + 0.315, + 0.712 + ], + "angle": 0, + "content": "Analyzing the Folding Process" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.718, + 0.306, + 0.726 + ], + "angle": 0, + "content": "Looking at the image sequence:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.726, + 0.513, + 0.735 + ], + "angle": 0, + "content": "1. We start with a flat rectangular sheet that's yellow on one side and green on the other" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.735, + 0.454, + 0.742 + ], + "angle": 0, + "content": "2. The paper goes through several folding steps (shown by dotted lines)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.742, + 0.516, + 0.751 + ], + "angle": 0, + "content": "3. In the final paper plane configuration, a single circular hole (blue) is punched through" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.718, + 0.516, + 0.751 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.757, + 0.308, + 0.767 + ], + "angle": 0, + "content": "Why Multiple Holes Appear" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.772, + 0.81, + 0.79 + ], + "angle": 0, + "content": "When a hole is punched through multiple layers of folded paper, it creates a hole in each layer. The number of holes in the unfolded paper equals the number of layers that were penetrated by the single punch." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.796, + 0.278, + 0.806 + ], + "angle": 0, + "content": "## Counting the Layers" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.812, + 0.447, + 0.821 + ], + "angle": 0, + "content": "Following the folding process carefully and looking at the final plane:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.821, + 0.383, + 0.828 + ], + "angle": 0, + "content": "- The initial rectangular sheet is folded several times" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.828, + 0.362, + 0.836 + ], + "angle": 0, + "content": "- Each fold creates multiple overlapping layers" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.836, + 0.486, + 0.844 + ], + "angle": 0, + "content": "- The hole appears to be punched through the right wing of the completed plane" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.821, + 0.486, + 0.844 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.85, + 0.811, + 0.868 + ], + "angle": 0, + "content": "Based on the specific folding pattern shown in the diagrams, when the sheet is completely folded into the paper plane configuration, the location where the hole is punched consists of 8 layers of paper stacked together." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.873, + 0.696, + 0.883 + ], + "angle": 0, + "content": "Therefore, when Paulo unfolds the paper, he would find 8 holes in the sheet - one for each layer that was penetrated by the single punch." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.889, + 0.228, + 0.898 + ], + "angle": 0, + "content": "Answer: C" + }, + { + "type": "image_caption", + "bbox": [ + 0.226, + 0.913, + 0.77, + 0.93 + ], + "angle": 0, + "content": "Figure 41: A sample correct case of Spatial Reasoning (difficulty: Medium)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "54" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.439, + 0.113, + 0.551, + 0.129 + ], + "angle": 0, + "content": "Spatial (Hard)" + }, + { + "type": "image", + "bbox": [ + 0.594, + 0.113, + 0.613, + 0.127 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.284, + 0.141, + 0.379, + 0.186 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.39, + 0.145, + 0.707, + 0.183 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.198, + 0.811, + 0.223 + ], + "angle": 0, + "content": "Question: Betty walked around the park once, starting from the marked point in the direction of the arrow. She took 4 pictures. In which order did she take the pictures?" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.224, + 0.434, + 0.236 + ], + "angle": 0, + "content": "Options: A: 1234. B: 1324. C: 2143. D: 2341" + }, + { + "type": "title", + "bbox": [ + 0.186, + 0.242, + 0.298, + 0.255 + ], + "angle": 0, + "content": "Ground Truth: (C)" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.262, + 0.812, + 0.282 + ], + "angle": 0, + "content": "Claude-3.7-Sonnet-Thinking: This problem requires me to determine the order in which Betty took 4 pictures as she walked around a park. I need to look at the diagram and figure out the sequence." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.289, + 0.571, + 0.299 + ], + "angle": 0, + "content": "First, I'll analyze what I see in the leftmost diagram, which seems to show the park layout:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.299, + 0.429, + 0.308 + ], + "angle": 0, + "content": "- There's a cylindrical object (like a column or tree trunk)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.308, + 0.447, + 0.317 + ], + "angle": 0, + "content": "- There's a cone-shaped object (like a pine tree or monument)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.317, + 0.425, + 0.326 + ], + "angle": 0, + "content": "- There's a dome-shaped object (like a small hill or rock)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.326, + 0.425, + 0.334 + ], + "angle": 0, + "content": "- There's an arrow indicating the direction Betty walked" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.299, + 0.447, + 0.334 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.343, + 0.561, + 0.353 + ], + "angle": 0, + "content": "Then I have 4 numbered pictures, each showing different arrangements of these objects:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.353, + 0.503, + 0.361 + ], + "angle": 0, + "content": "- Picture 1: Shows the dome shape on the left and the cylinder on the right" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.361, + 0.499, + 0.37 + ], + "angle": 0, + "content": "- Picture 2: Shows the cone shape on the left and the cylinder on the right" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.37, + 0.502, + 0.379 + ], + "angle": 0, + "content": "- Picture 3: Shows the cylinder on the left and the dome shape on the right." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.379, + 0.593, + 0.388 + ], + "angle": 0, + "content": "- Picture 4: Shows the cylinder on the left and both the dome shape and cone shape on the right" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.343, + 0.593, + 0.388 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.396, + 0.723, + 0.406 + ], + "angle": 0, + "content": "I need to determine the order in which Betty would see these views as she walks around the park in the direction of the arrow." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.414, + 0.607, + 0.424 + ], + "angle": 0, + "content": "Looking at the main diagram, the arrow indicates she's walking counterclockwise around the park." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.432, + 0.559, + 0.441 + ], + "angle": 0, + "content": "If I start at the marked point and move in the direction of the arrow (counterclockwise):" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.442, + 0.598, + 0.45 + ], + "angle": 0, + "content": "1. Betty would first see the dome on her left and the cylinder on her right - this matches Picture 1" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.45, + 0.74, + 0.458 + ], + "angle": 0, + "content": "2. As she continues counterclockwise, she would next see the cone on her left and the cylinder on her right - this matches Picture 2" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.459, + 0.656, + 0.467 + ], + "angle": 0, + "content": "3. Continuing further, she would see the cylinder on her left and the dome on her right - this matches Picture 3" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.468, + 0.763, + 0.476 + ], + "angle": 0, + "content": "4. Finally, completing the circle, she would see the cylinder on her left with both the dome and cone on her right - this matches Picture 4" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.442, + 0.763, + 0.476 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.485, + 0.548, + 0.495 + ], + "angle": 0, + "content": "So the order of the pictures would be 1, 2, 3, 4, which corresponds to option (A) 1234." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.503, + 0.321, + 0.513 + ], + "angle": 0, + "content": "Let me double-check this logic:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.521, + 0.432, + 0.53 + ], + "angle": 0, + "content": "From the starting position and moving counterclockwise:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.53, + 0.418, + 0.539 + ], + "angle": 0, + "content": "- First view: dome on left, cylinder on right (Picture 1)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.54, + 0.424, + 0.549 + ], + "angle": 0, + "content": "- Second view: cone on left, cylinder on right (Picture 2)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.55, + 0.414, + 0.558 + ], + "angle": 0, + "content": "- Third view: cylinder on left, dome on right (Picture 1)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.558, + 0.468, + 0.566 + ], + "angle": 0, + "content": "- Fourth view: cylinder on left, dome and cone on right (Picture 4)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.574, + 0.364, + 0.583 + ], + "angle": 0, + "content": "This confirms that the answer is (A) 1234." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.584, + 0.259, + 0.592 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.592, + 0.4, + 0.601 + ], + "angle": 0, + "content": "# Determining Picture Order in Betty's Park Walk" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.609, + 0.717, + 0.619 + ], + "angle": 0, + "content": "I need to figure out the order in which Betty took 4 pictures as she walked once around the park in the direction of the arrow." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.628, + 0.318, + 0.636 + ], + "angle": 0, + "content": "Analyzing the Park Layout" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.637, + 0.328, + 0.645 + ], + "angle": 0, + "content": "From the first diagram, I can see:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.646, + 0.415, + 0.655 + ], + "angle": 0, + "content": "- A cylindrical object (looks like a column/tree trunk)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.655, + 0.43, + 0.664 + ], + "angle": 0, + "content": "- A cone-shaped object (like a pointed tree or monument)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.664, + 0.394, + 0.672 + ], + "angle": 0, + "content": "- A dome-shaped object (like a small hill or rock)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.672, + 0.404, + 0.681 + ], + "angle": 0, + "content": "- An arrow indicating counterclockwise movement" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.646, + 0.43, + 0.681 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.69, + 0.304, + 0.698 + ], + "angle": 0, + "content": "Analyzing Each Picture" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.699, + 0.496, + 0.707 + ], + "angle": 0, + "content": "1. **Picture 1**: Shows the dome shape on left, cylindrical object on right" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.708, + 0.492, + 0.716 + ], + "angle": 0, + "content": "2. **Picture 2**: Shows the cone shape on left, cylindrical object on right" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.717, + 0.496, + 0.725 + ], + "angle": 0, + "content": "3. **Picture 3**: Shows the cylindrical object on left, dome shape on right" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.725, + 0.562, + 0.734 + ], + "angle": 0, + "content": "4. **Picture 4**: Shows the cylindrical object on left, both dome and cone shapes on right" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.699, + 0.562, + 0.734 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.743, + 0.316, + 0.752 + ], + "angle": 0, + "content": "Determining the Sequence" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.753, + 0.546, + 0.761 + ], + "angle": 0, + "content": "Starting from the marked point and moving counterclockwise (following the arrow):" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.762, + 0.569, + 0.769 + ], + "angle": 0, + "content": "1. First, Betty would see the dome on her left and the cylinder on her right \\(\\rightarrow^{**}\\) Picture \\(1^{**}\\)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.77, + 0.682, + 0.778 + ], + "angle": 0, + "content": "2. Continuing counterclockwise, she would next see the cone on her left and the cylinder on her right \\(\\rightarrow^{**}\\) Picture \\(2^{**}\\)" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.779, + 0.607, + 0.787 + ], + "angle": 0, + "content": "3. Moving further, she would see the cylinder on her left and the dome on her right \\(\\rightarrow^{**}\\) Picture 3\\*\\*" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.788, + 0.636, + 0.796 + ], + "angle": 0, + "content": "4. Finally, she would see the cylinder on her left with both the dome and cone on her right \\(\\rightarrow^{**}\\) Picture \\(4^{**}\\)" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.753, + 0.682, + 0.796 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.805, + 0.334, + 0.815 + ], + "angle": 0, + "content": "This gives us the sequence 1-2-3-4." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.823, + 0.235, + 0.832 + ], + "angle": 0, + "content": "Answer: A" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.837, + 0.44, + 0.849 + ], + "angle": 0, + "content": "Error Category: Spatial / Orientation Error" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.85, + 0.812, + 0.899 + ], + "angle": 0, + "content": "Error Reason: The model makes a spatial orientation error by incorrectly interpreting the shifting perspectives as Betty walks around the park, especially at the starting point. It assumes a consistent viewpoint but fails to mentally rotate the scene to match what Betty would actually see from each new position." + }, + { + "type": "image_caption", + "bbox": [ + 0.245, + 0.913, + 0.751, + 0.93 + ], + "angle": 0, + "content": "Figure 42: A sample error case of Spatial Reasoning (difficulty: Hard)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "55" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Submission In Progress" + }, + { + "type": "title", + "bbox": [ + 0.439, + 0.113, + 0.551, + 0.129 + ], + "angle": 0, + "content": "Spatial (Hard)" + }, + { + "type": "image", + "bbox": [ + 0.594, + 0.113, + 0.613, + 0.127 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.189, + 0.146, + 0.253, + 0.171 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.18, + 0.225, + 0.186 + ], + "angle": 0, + "content": "A" + }, + { + "type": "image", + "bbox": [ + 0.274, + 0.141, + 0.328, + 0.174 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.297, + 0.179, + 0.305, + 0.186 + ], + "angle": 0, + "content": "B" + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.141, + 0.396, + 0.174 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.365, + 0.179, + 0.373, + 0.186 + ], + "angle": 0, + "content": "C" + }, + { + "type": "image", + "bbox": [ + 0.416, + 0.141, + 0.469, + 0.174 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.441, + 0.179, + 0.449, + 0.186 + ], + "angle": 0, + "content": "D" + }, + { + "type": "text", + "bbox": [ + 0.48, + 0.142, + 0.812, + 0.168 + ], + "angle": 0, + "content": "Question: From the flat expansion view of paper boxes, which paper box is different from the other three?" + }, + { + "type": "text", + "bbox": [ + 0.48, + 0.176, + 0.593, + 0.19 + ], + "angle": 0, + "content": "Ground Truth: (A)" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.203, + 0.812, + 0.222 + ], + "angle": 0, + "content": "Claude-3.7-Sonnet-Thinking: This problem is asking me to identify which one of the four flat expansions of paper boxes is different from the others. Let me analyze each one carefully." + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.229, + 0.81, + 0.248 + ], + "angle": 0, + "content": "These patterns represent unfolded 3D boxes (nets), where the squares would fold to create a 3D shape. The black squares and circles represent some kind of marking or pattern on the surface of the box." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.256, + 0.391, + 0.266 + ], + "angle": 0, + "content": "Let me analyze the structure of each expansion:" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.266, + 0.741, + 0.275 + ], + "angle": 0, + "content": "A: This appears to be a linear arrangement of 5 squares with a T-shape extension. The pattern has black regions in various squares." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.275, + 0.69, + 0.284 + ], + "angle": 0, + "content": "B: This is a more complex shape with 7 squares arranged in a cross-like pattern. It has black regions in various squares." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.284, + 0.616, + 0.293 + ], + "angle": 0, + "content": "C: This also appears to be an arrangement of squares that could fold into a box, with black markings." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.293, + 0.516, + 0.302 + ], + "angle": 0, + "content": "D: This is another cross-shaped arrangement of squares with black markings." + }, + { + "type": "list", + "bbox": [ + 0.185, + 0.266, + 0.741, + 0.302 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.309, + 0.763, + 0.32 + ], + "angle": 0, + "content": "To determine which one is different, I need to see if they would all fold into valid 3D boxes and if there's any pattern that sets one apart." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.328, + 0.368, + 0.337 + ], + "angle": 0, + "content": "Let me think about how these would fold:" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.337, + 0.461, + 0.346 + ], + "angle": 0, + "content": "- A cube has 6 faces, so a valid net should have exactly 6 squares" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.346, + 0.497, + 0.355 + ], + "angle": 0, + "content": "- The squares must be connected in a way that allows folding into a cube" + }, + { + "type": "list", + "bbox": [ + 0.185, + 0.328, + 0.497, + 0.355 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.363, + 0.33, + 0.372 + ], + "angle": 0, + "content": "Let me count the squares in each:" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.372, + 0.272, + 0.381 + ], + "angle": 0, + "content": "A: I count 5 squares" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.381, + 0.272, + 0.39 + ], + "angle": 0, + "content": "B: I count 7 squares" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.39, + 0.272, + 0.4 + ], + "angle": 0, + "content": "C: I count 7 squares" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.4, + 0.272, + 0.409 + ], + "angle": 0, + "content": "D:1 count 7 squares" + }, + { + "type": "list", + "bbox": [ + 0.185, + 0.372, + 0.272, + 0.409 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.416, + 0.81, + 0.436 + ], + "angle": 0, + "content": "Looking at the number of squares, A has 5 squares while B, C, and D each have 7 squares. A cube needs 6 faces, so none of these would fold into a perfect cube. A is different in that it has fewer squares than the others." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.442, + 0.563, + 0.453 + ], + "angle": 0, + "content": "However, let me look more carefully at these patterns to see if there's another difference." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.461, + 0.293, + 0.47 + ], + "angle": 0, + "content": "Actually, let me recount:" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.47, + 0.543, + 0.48 + ], + "angle": 0, + "content": "A: Looking more carefully, I see 6 squares arranged in a T-pattern with an extension" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.48, + 0.446, + 0.489 + ], + "angle": 0, + "content": "B: I see 7 squares arranged in a cross-pattern with extensions" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.489, + 0.354, + 0.498 + ], + "angle": 0, + "content": "C: I see 7 squares arranged in a pattern" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.498, + 0.379, + 0.507 + ], + "angle": 0, + "content": "D: I see 7 squares arranged in a cross-pattern" + }, + { + "type": "list", + "bbox": [ + 0.185, + 0.47, + 0.543, + 0.507 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.514, + 0.81, + 0.534 + ], + "angle": 0, + "content": "So option A has 6 squares, while B, C, and D each have 7 squares. Since a cube has 6 faces, A is the only one that could potentially fold into a proper cube. The others have an extra square." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.541, + 0.334, + 0.55 + ], + "angle": 0, + "content": "Let me check once more carefully:" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.55, + 0.573, + 0.559 + ], + "angle": 0, + "content": "A: I count squares from left to right and top to bottom: 1, 2, 3, 4, 5, 6. So there are 6 squares." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.56, + 0.272, + 0.568 + ], + "angle": 0, + "content": "B: I count 7 squares" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.568, + 0.274, + 0.578 + ], + "angle": 0, + "content": "C: I count 7 squares." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.578, + 0.274, + 0.586 + ], + "angle": 0, + "content": "D: I count 7 squares." + }, + { + "type": "list", + "bbox": [ + 0.185, + 0.55, + 0.573, + 0.586 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.594, + 0.704, + 0.605 + ], + "angle": 0, + "content": "So based on the number of squares, A is different from the others because it has 6 squares while the others have 7 squares." + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.611, + 0.81, + 0.631 + ], + "angle": 0, + "content": "But let me think about whether there could be another difference that's more subtle, like the pattern of black regions or the connectivity of the squares." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.638, + 0.714, + 0.659 + ], + "angle": 0, + "content": "Looking at the patterns of black and white regions, they all seem to have black regions and circles in different arrangements. Looking at the connectivity of the squares, they all have different patterns of how the squares connect." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.665, + 0.809, + 0.685 + ], + "angle": 0, + "content": "I think the key difference is the number of squares. A has 6 squares which is the exact number needed for a cube. B, C, and D all have 7 squares, which is one too many for a standard cube - these would need to have one square overlapping or not used when folded." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.692, + 0.27, + 0.701 + ], + "angle": 0, + "content": "So my answer is A." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.701, + 0.256, + 0.71 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.71, + 0.36, + 0.72 + ], + "angle": 0, + "content": "Analysis of Paper Box Flat Expansions" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.727, + 0.733, + 0.738 + ], + "angle": 0, + "content": "I need to determine which of these flat expansions (nets) is different from the others by examining their structure and properties." + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.746, + 0.428, + 0.755 + ], + "angle": 0, + "content": "First, let me count the number of squares in each option:" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.755, + 0.456, + 0.765 + ], + "angle": 0, + "content": "- Option A: 6 squares arranged in an L-shape with an extension" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.765, + 0.461, + 0.774 + ], + "angle": 0, + "content": "- Option B: 7 squares arranged in a cross pattern with extensions" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.774, + 0.442, + 0.782 + ], + "angle": 0, + "content": "- Option C: 7 squares arranged in a T-shape with extensions" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.782, + 0.463, + 0.791 + ], + "angle": 0, + "content": "- Option D: 7 squares arranged in a cross pattern with extensions" + }, + { + "type": "list", + "bbox": [ + 0.185, + 0.746, + 0.463, + 0.791 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.798, + 0.721, + 0.809 + ], + "angle": 0, + "content": "The key difference I notice is that option A has exactly 6 squares, while all the other options (B, C, and D) have 7 squares each." + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.816, + 0.81, + 0.835 + ], + "angle": 0, + "content": "This is significant because a cube has exactly 6 faces, so a proper net for a cube should have exactly 6 squares. Options B, C, and D all have one extra square, which means they would either:" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.835, + 0.316, + 0.844 + ], + "angle": 0, + "content": "1. Not fold into a proper cube" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.844, + 0.375, + 0.853 + ], + "angle": 0, + "content": "2. Have an overlapping square when folded" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.853, + 0.348, + 0.862 + ], + "angle": 0, + "content": "3. Require leaving one square unused" + }, + { + "type": "list", + "bbox": [ + 0.185, + 0.835, + 0.375, + 0.862 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.869, + 0.781, + 0.88 + ], + "angle": 0, + "content": "Therefore, option A is different from the other three because it's the only one with the correct number of squares (6) to form a standard cube." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.888, + 0.235, + 0.897 + ], + "angle": 0, + "content": "Answer: A" + }, + { + "type": "image_caption", + "bbox": [ + 0.239, + 0.913, + 0.757, + 0.93 + ], + "angle": 0, + "content": "Figure 43: A sample correct case of Spatial Reasoning (difficulty: Hard)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "56" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10342/0ae2a017-8bf2-4c96-bb86-70cf5464648b_origin.pdf b/data/2025/2504_10xxx/2504.10342/0ae2a017-8bf2-4c96-bb86-70cf5464648b_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..34e7a942d26924a46ae19132964f6ab219a6fcec --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/0ae2a017-8bf2-4c96-bb86-70cf5464648b_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e29cdab17cf32c8a0b8ea3ba4c229766360fc7e849aa6b32fa055c88bd97820 +size 5180174 diff --git a/data/2025/2504_10xxx/2504.10342/full.md b/data/2025/2504_10xxx/2504.10342/full.md new file mode 100644 index 0000000000000000000000000000000000000000..d0580d72eca41bbb981546c6f3451bed16b11dc1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/full.md @@ -0,0 +1,3584 @@ +# VISUALPUZZLES: Decoupling Multimodal Reasoning Evaluation from Domain Knowledge + +Yueqi Song\*, Tianyue Ou\*, Yibo Kong†, Zecheng Li†, Graham Neubig, Xiang Yue {yueqis, tianyueo, gneubig, xyue2}@cs.cmu.edu + +Carnegie Mellon University + +https://neulab.github.io/VisualPuzzles/ + +# Abstract + +Current multimodal benchmarks often conflate reasoning with domain-specific knowledge, making it difficult to isolate and evaluate general reasoning abilities in non-expert settings. To address this, we introduce VISUALPUZZLES, a benchmark that targets visual reasoning while deliberately minimizing reliance on specialized knowledge. VISUALPUZZLES consists of diverse questions spanning five categories: algorithmic, analogical, deductive, inductive, and spatial reasoning. One major source of our questions is manually translated logical reasoning questions from the Chinese Civil Service Examination. Experiments show that VISUALPUZZLES requires significantly less intensive domain-specific knowledge and more complex reasoning compared to benchmarks like MMMU, enabling us to better evaluate genuine multimodal reasoning. Evaluations show that state-of-the-art multimodal large language models consistently lag behind human performance on VISUALPUZZLES, and that strong performance on knowledge-intensive benchmarks does not necessarily translate to success on reasoning-focused, knowledge-light tasks. Additionally, reasoning enhancements such as scaling up inference compute (with "thinking" modes) yield inconsistent gains across models and task types, and we observe no clear correlation between model size and performance. We also found that models exhibit different reasoning and answering patterns on VISUALPUZZLES compared to benchmarks with heavier emphasis on knowledge. VISUALPUZZLES offers a clearer lens through which to evaluate reasoning capabilities beyond factual recall and domain knowledge. + +![](images/b49fbe7e0699eadfaf858d3fd9a5d59db1d54bb1ba3708e26d31d65d8c714c0e.jpg) +Figure 1: Model accuracy on VISUALPUZZLES compared to human performance percentiles. All evaluated models fall below the human 5th percentile (57.5%), highlighting the difficulty of VISUALPUZZLES. Interestingly, models with explicit "thinking" modes do not consistently outperform their base versions, suggesting that current reasoning strategies do not yet generalize well to VISUALPUZZLES's scenarios, even though these strategies have proven effective in existing reasoning tasks that often rely heavily on domain-specific knowledge. + +![](images/92bbb61a0dc500ca4cc6ff7ae9e4bf125395e05e5543c7fc02bd64ec2111821d.jpg) + +![](images/1ed8ee9704d40204c4a09619e1b5a1f5ac09cbf72757ce1849a071e4e1de96c6.jpg) + +![](images/78312d3960e121d282f214dae295f027c4d2a797ed5d6d36b898b32f29426c95.jpg) + +![](images/d6fcb4af5124da5e912611d8cc6c3d5089127f9bcd51a49bbc48a62769953913.jpg) +Figure 2: Example VISUALPUZZLES instances within each reasoning category + +![](images/bf86655e1940e3c1363d3ce9ef3b6abf9cd477dffada06d559f29bcafaa6f316.jpg) + +# 1 Introduction + +Reasoning is a cornerstone of both human and artificial intelligence, enabling systems to solve problems, draw inferences, and make decisions from information. Recent advances in multimodal large language models (MLLMs) (OpenAI, 2024; Liu et al., 2023a; Li et al., 2024; Dubey et al., 2024; Qwen Team, 2025a; Yue et al., 2025) exhibit early signs of reasoning in tackling complex tasks such as answering expert-level visual questions (Yue et al., 2024a;b), interpreting scientific diagrams (Roberts et al., 2024), and solving challenging math word problems (Lu et al., 2023). + +Many of the tasks mentioned above are inherently knowledge-intensive; large amounts of knowledge in domains such as science or math are necessary to answer questions correctly (Yue et al., 2024a). However, in reality, reasoning does not necessitate knowledge. Even non-expert humans can successfully solve logic puzzles, spatial reasoning problems, and analogical tasks using general inferential skills, without requiring deep domain expertise. This raises an important question: Can we measure MLLMs's reasoning ability independently of measuring their acquisition of domain-specific knowledge? This question is particularly important with the recent rapid development of reasoning models in the textual domain (Jaech et al., 2024; DeepSeek-AI, 2025; Qwen Team, 2025b), and emerging application to the visual domain (Qwen Team, 2024). + +To address this question, we introduce VISUALPUZZLES, a multimodal benchmark explicitly crafted to assess reasoning capabilities independent of specialized knowledge. VISUALPUZZLES comprises 1,168 carefully curated puzzle-like questions that span five distinct categories of reasoning: algorithmic, analogical, deductive, inductive, and spatial, each annotated with varying difficulty levels. VISUALPUZZLES only requires basic common knowledge and the information presented in the question to solve problems, disentangling reasoning from domain-specific knowledge recall. Our experiments show that VISUALPUZZLES requires significantly fewer domain-specific knowledge concepts compared to benchmarks like MMMU, and models have sufficient knowledge required to solve VISUALPUZZLES questions, enabling us to better assess multimodal reasoning versus pretrained factual knowledge. While VISUALPUZZLES minimizes reliance on domain expertise, its reasoning complexity exceeds that of existing benchmarks: in VISUALPUZZLES, $82.1\%$ of models' solution steps are logical reasoning steps, compared to $71.5\%$ in MMMU. Additionally, no current MLLM surpasses even the 5th-percentile human performance, highlighting the benchmark's difficulty and the limitations of today's models in general-purpose visual reasoning. + +Our experiments with VISUALPUZZLES reveal critical limitations in current MLLMs' multimodal reasoning ability by factoring out domain-specific knowledge requirements and only focusing on reasoning. Specifically, we uncover four key findings: + +- Strong performance on knowledge-heavy benchmarks does not transfer well. Models that rank highly on MathVista and MMMU often experience substantial performance drops on VISUALPUZZLES, highlighting a disconnect between knowledge-rich and knowledge-light multimodal reasoning tasks. +- Humans outperform models on easy and medium tasks, while both degrade on harder ones. Human participants show strong and consistent performance on easy and medium-level questions across reasoning categories. In contrast, models struggle even on simpler tasks. +- Reasoning enhancements (e.g., long CoT and "thinking" mode) yield inconsistent gains. While explicit reasoning strategies help certain models tackle complex reasoning tasks, these techniques do not consistently improve performance across all model families and task types. +- Scaling model size does not ensure stronger reasoning. We observe no clear trend indicating that larger models outperform smaller ones on VISUALPUZZLES, suggesting that scaling up parameters alone is insufficient to improve domain-agnostic multimodal reasoning. + +# 2 VISUALPUZZLES + +# 2.1 Motivation and Design Principles of VISUALPUZZLES + +Existing benchmarks often conflate multimodal reasoning with domain-specific knowledge, making it difficult to isolate and measure the pure reasoning capabilities of these models. + +VISUALPUZZLES is designed to explicitly address this issue by providing a testbed focused on evaluating multimodal reasoning in isolation from specialized knowledge. Specifically, VISUALPUZZLES centers on puzzle-like questions that rely solely on the provided image, question text, and basic common-sense reasoning. The core design principle behind VISUALPUZZLES is to limit the need for external or pretrained domain knowledge. Figure 2 shows examples of VISUALPUZZLES within each reasoning category. + +# 2.2 Data Collection and Curation + +We curated VISUALPUZZLES using a multi-stage pipeline. The process involved sourcing, adapting, and validating questions with an emphasis on reasoning quality and minimal reliance on specialized knowledge. + +Question Sourcing. We collected questions from three primary sources: (1) online resources and textbooks focused on logical, visual, and spatial puzzles, (2) synthesized items using images from large-scale vision datasets paired with text prompts, and (3) carefully repurposed items from existing multimodal reasoning benchmarks. Each source was selected to ensure a wide variety of reasoning challenges while avoiding trivial or fact-heavy questions. One major source of our questions is manually translated logical reasoning questions from the Chinese Civil Service Examination1. Other sources are listed in Appendix A. + +Format Adaptation. All collected items were adapted into a consistent multiple-choice format with four options, balancing between text-based and image-based answer choices. This modality balance allows us to better test models' abilities to perform reasoning across diverse formats. + +Data Validation. During curation, we applied strict filtering criteria to eliminate questions requiring advanced mathematical knowledge, specialized domain knowledge and facts. Questions were retained only if they could be solved using information present in the image, + +the question prompt, and basic common sense. A multi-round validation process was conducted by human annotators, focusing on question clarity, solvability, and reasoning type classification. + +Attribute Annotation. Finally, each question was annotated with two key attributes: + +- Reasoning Category: Each item was categorized as algorithmic, analogical, deductive, inductive, or spatial reasoning. These five categories were selected as they represent fundamental forms of reasoning widely discussed in literature (Liu et al., 2020; Lu et al., 2023; Yue et al., 2024a; Gao et al., 2023). At the same time, we aimed to balance comprehensiveness with conciseness, avoiding an overly fine-grained taxonomy that could dilute the benchmark's clarity and usability. This categorization ensures that VISUALPUZZLES covers a broad yet manageable set of reasoning skills relevant to multimodal LLM evaluation. + +- Algorithmic Reasoning involves reasoning over algorithmic rules. +- Analogical Reasoning requires analyzing the relationships between a pair of entities. +- Deductive Reasoning involves logically drawing conclusions from known premises. +- Inductive Reasoning focuses on generalizing rules from observed patterns. +- Spatial Reasoning requires interpreting and manipulating spatial relationships. + +- Difficulty Level: Labeled as easy, medium, or hard, based on annotators' estimated cognitive load and time-to-solve metrics. + +This pipeline ensures that VISUALPUZZLES presents a diverse set of high-quality questions designed to challenge multimodal LLMs on their reasoning abilities without involving + +pretrained domain knowledge. + +# 2.3 Dataset Statistics + +VISUALPUZZLES comprises 1,168 multimodal reasoning puzzles. It is designed to provide a balanced distribution across different reasoning categories, difficulty levels, and option formats for comprehensive evaluation. The statistics of VISUALPUZZLES are shown in Table 1. + +Across the five reasoning types, we maintain a roughly even distribution, ensuring that no single reasoning style dominates the benchmark. + +
CategoryStatistics
Total Questions1168
- Algorithmic Reasoning262
- Analogical Reasoning211
- Deductive Reasoning200
- Inductive Reasoning209
- Spatial Reasoning286
Easy/Medium/Hard46%/39%/15%
Option Type (Image/Text)57%/43%
AVG. Question Length154.9
% Easy Words54%
+ +Table 1: Statistics of VISUALPUZZLES + +Similarly, we balanced the dataset across the three difficulty levels (easy, medium, hard) to capture a wide spectrum of cognitive demands. Approximately half of the answer choices in the dataset are image-based and the other half are text-based, enabling evaluation of models' abilities to reason across diverse query formats. + +In terms of language complexity, VISUALPUZZLES was constructed with an emphasis on accessibility. Most of the question text uses Basic English vocabulary2 to minimize the impact of linguistic complexity on reasoning performance, focusing the evaluation strictly on multimodal reasoning. + +Compared to prior benchmarks, VISUALPUZZLES is unique in that it explicitly minimizes domain-specific knowledge requirements while maintaining high reasoning complexity. We demonstrate these traits of VISUALPUZZLES in Section 5. + +# 3 Experiments and Results + +# 3.1 Experimental Setup + +We comprehensively evaluated the reasoning abilities of a variety of MLLMs on VISUALPUZZLES. Additionally, we performed human evaluations to better understand the gap between human and models' reasoning capabilities. + +We selected a diverse set of proprietary and open MLLMs to ensure broad coverage in terms of model architecture, training scale, and intended application domains. This diversity allows us to capture a wide spectrum of current approaches and capabilities in the field. We integrated VISUALPUZZLES into Lmms-eval (Li* et al., 2024). + +Proprietary Models. We evaluate several leading proprietary models that represent the current state of the art: (1) GPT-4o, o1, o3, and o4-mini (OpenAI, 2024; Jaech et al., 2024); (2) Gemini-1.5-Pro, Gemini-2.0-Flash, Gemini-2.0-Flash-Thinking, and Gemini-2.5-Pro (Gemini et al., 2023); (3) Claude-3.5-Sonnet and Claude-3.7-Sonnet (Anthropic, 2022). Among these, o1, o3, o4-mini are explicitly optimized for reasoning, while Gemini-2.0-Flash-Thinking and Claude-3.7-Sonnet incorporate dedicated modules for extensive step-by-step problem-solving. + +Open Models. We further evaluate widely used open MLLMs to gauge how open models compare against proprietary models: (1) LLaVA Series (Liu et al., 2023a; 2024a; Li et al., 2024): LLaVA-1.5 (7B/13B), LLaVA-1.6 (7B/13B/34B), and LLaVA-OV (0.5B/7B/72B); (2) Llama-3.2-Vision-Instruct (11B/90B) (Dubey et al., 2024); (3) Qwen-VL Series (Bai et al., 2024; Yang et al., 2024; Qwen Team, 2025a; 2024): including Qwen-VL, Qwen2-VL (2B/7B/72B-Instruct), Qwen2.5-VL (3B/7B/72B-Instruct), and QvQ-72B-Preview; (4) Cambrian (8B/13B) (Tong et al., 2024); (5) Pangea-7B (Yue et al., 2025). + +We apply both direct multiple-choice prompting and Chain-of-Thought (CoT) prompting to each model, following recent findings that CoT can significantly enhance model reasoning on complex multimodal tasks. For each model we report the best performance, whether achieved by direct multiple-choice prompting or CoT prompting. + +Human Performance. To establish a strong baseline for comparison, we conducted human evaluations with 70 college-level volunteers. Human performance provides a valuable upper-bound reference for assessing the current capabilities and limitations of multimodal reasoning models. While this serves as a benchmark for present-day systems, it is possible that future models could surpass this level of performance. Each participant was randomly assigned a subset of the puzzles and completed them under the same resource-constrained conditions as the models (i.e., without access to external tools or the internet). On average, participants completed each puzzle in 78 seconds, reflecting the typical cognitive load and time demands imposed by VISUALPUZZLES. + +# 3.2 Overall Results + +Table 2 and Figure 1 compare the performance of humans and a selected set of models.3 All evaluated models, even the proprietary ones, perform below the 4th percentile of human accuracy, underscoring the significant gap in multimodal reasoning abilities. These results reinforce our finding that, although models have made progress in multimodal understanding, there remains a substantial margin for improvement before they can match or surpass human performance on multimodal reasoning. + +This pattern holds across categories as well. In Table 2, top human participants (95th percentile) exhibit near-perfect accuracy on multiple reasoning categories, while model performance remains substantially lower, even lower than the worst human performance (5th percentile). These results emphasize the need for continued innovation in model architectures and training paradigms if we aim to close the gap between model and human intelligence on complex multimodal reasoning. + +# 4 Disentangling Reasoning from Domain Knowledge + +# 4.1 Knowledge Intensity of VISUALPUZZLES + +Is VISUALPUZZLES less knowledge-intensive than existing reasoning benchmarks? This question is central to our goal of disentangling reasoning ability from domain-specific + +
ModelAlgorithmsAnalogicalDeductiveInductiveSpatialOverall
Random Choice25.025.025.025.025.025.0
Human (95th Percentile)100.0100.0100.081.6100.089.3
Human (50th Percentile)88.066.080.050.090.075.0
Human (5th Percentile)68.125.037.00.059.157.5
Proprietary Models
GPT-4o49.258.349.027.326.241.3
o163.768.367.529.234.351.8
o364.568.369.527.342.754.0
o4-mini65.368.775.533.045.557.0
Gemini-2.0-flash55.358.857.024.431.845.0
Gemini-2.0-flash-thinking46.670.149.024.925.542.2
Gemini-2.5-pro60.064.060.029.736.449.5
Claude-3.7-Sonnet64.548.365.026.837.448.3
Claude-3.7-Sonnet-Thinking67.244.161.531.137.148.2
Open Models (Qwen-Based)
LLaVA-OV-7B27.528.040.524.428.029.4
Pangea-7B32.423.738.528.732.531.3
Qwen2.5-VL-7B-Instruct38.223.751.524.931.133.7
LLaVA-OV-72B34.726.537.027.328.730.8
QvQ-72B-Preview44.843.644.026.830.837.8
Qwen2.5-VL-72B-Instruct53.446.958.025.829.542.3
Open Models (Llama-Based)
Cambrian-8B31.324.236.024.029.028.9
Llama-3.2-11B-Vision-Instruct31.030.839.021.126.229.4
Llama-3.2-90B-Vision-Instruct45.023.243.026.331.534.1
+ +knowledge. Many current benchmarks blur this line, making it difficult to assess general reasoning in non-expert settings. VISUALPUZZLES was designed to target visual reasoning skills while deliberately minimizing reliance on specialized knowledge. + +To test whether VISUALPUZZLES achieves this goal, we prompted GPT-4o to generate "knowledge concept checklists" for 50 randomly selected questions from a widely-used knowledge-intensive reasoning dataset MMMU and 50 from VISUALPUZZLES. We manually verified each question as discussed in subsection E.3. Each checklist comprises knowledge-specific questions intended to assess whether a model possesses the background information required to solve the original problem. For example, if a question depends on understanding two distinct physics laws, its checklist would include a question to explain each. The number of checklist items per instance serves as a proxy for knowledge intensity. + +We found that MMMU problems resulted in significantly more checklist items on average (3.9) compared to VISUALPUZZLES (1.1), as shown in Table 3. This supports the hypothesis that VISUALPUZZLES is substantially less reliant on domain knowledge. As a result, performance on VISUALPUZZLES more directly reflects a model's ability to reason over visual and textual content, offering + +Table 2: Performance (%) comparison of humans and selected models on VISUALPUZZLES. We report the best performance resulting from direct multiple-choice prompting and CoT prompting for each method. We highlighted all the reasoning models. + +
Benchmark# Knowledge Qs.
MMMU3.9
VISUALPUZZLES1.1
+ +Table 3: AVG. number of knowledge concept questions generated per instance on MMMU vs. VISUALPUZZLES. + +a clearer signal of progress in multimodal reasoning. Full prompt examples and further discussion are provided in Appendix E. + +Do models already possess the knowledge required to solve VISUALPUZZLES? To explore this, we measured models' knowledge accuracy—their ability to answer the knowledge checklist questions correctly—on both benchmarks. This metric reflects how much of the required knowledge is already known by the model, independent of reasoning. We found a stark contrast: while many models exceed $90\%$ knowledge accuracy on VISUALPUZZLES, + +![](images/422bc896f6eb461b560d28a6bcdb7b46675ce08dc157a69ee3ea72239fe20f5f.jpg) + +![](images/dd2c3eda1142f57bd67e35d01e2305f06caa0abe215815db4166a95aaa2a731c.jpg) + +![](images/8ac1191f2d067a68d549fd32aad8c30f764923b7f3097b435600a39acd0bbc64.jpg) +Figure 3: Scatter plots with trend lines of the relationship between accuracy and model size (top) and the relationship between reasoning and knowledge accuracy (bottom) on MMMU and VISUALPUZZLES. The dots' sizes represent relative model sizes. The correlation between reasoning accuracy and knowledge accuracy is higher on MMMU (0.8) than on VISUALPUZZLES (0.4). + +![](images/085189f854c325765c80bfc7f114017ecfa432448aa0ef26f6e006d72565f3fc.jpg) + +most score below $60\%$ on MMMU, with smaller models frequently dropping under $50\%$ . Only the largest models approach $80\%$ accuracy on MMMU, underscoring its heavier reliance on domain-specific knowledge. + +Does scaling up model size improve performance? We also plot reasoning accuracy (i.e., overall performance on the benchmark) in Figure 3, revealing some interesting trends: + +- MMMU. Larger models tend to have higher knowledge accuracy, and this often translates into higher overall benchmark performance. This aligns with MMMU's reliance on domain-specific understanding; models with more parameters and training data are better at recalling relevant factual knowledge, thus improving their overall performance. +- VISUALPUZZLES. Although many models achieve near- $100\%$ knowledge accuracy on VISUALPUZZLES, we observe no clear increase in both knowledge and reasoning accuracy as model size grows. In contrast to MMMU, simply scaling number of parameters does not guarantee better performance on VISUALPUZZLES, implying that further gains on VISUALPUZZLES must stem from improvements in models' reasoning abilities rather than reliance on extensive knowledge. + +What is the relationship between knowledge and reasoning? Figure 3 shows two scatter plots with trend lines that measure how knowledge accuracy correlates with reasoning accuracy across different open models, where the relative sizes of the dots represent the sizes of the models. On MMMU (left), there is a strong positive correlation (0.8), suggesting that a model possessing more knowledge strongly correlates better reasoning performance. In contrast, VISUALPUZZLES (right) exhibits a more modest correlation (0.4). Although there is still an upward trend, gains in knowledge accuracy lead to smaller improvements in reasoning accuracy. This discrepancy implies that while overcoming knowledge gaps is central to reasoning success on MMMU, VISUALPUZZLES tasks demand more nuanced inference steps that depends less on domain knowledge. + +Overall, these findings reinforce that VISUALPUZZLES's comparatively lower knowledge requirements are readily met by both proprietary and open models. By contrast, MMMU poses a greater challenge to smaller models in terms of knowledge, for which scaling in size clearly benefits knowledge-intensive tasks. However, on VISUALPUZZLES, larger model size alone is not a decisive factor, which might imply that genuine multimodal reasoning depends on more than just number of parameters or pre-trained knowledge. + +# 4.2 Reasoning Complexity of VISUALPUZZLES + +# Do questions in VISUALPUZZLES require more complex reasoning than those in existing benchmarks like MMMU? + +Besides observing that models generally achieve lower accuracy on VISUALPUZZLES compared to MMMU, we further investigated whether this gap stems from increased reasoning complexity. To do so, we measured the proportion of reasoning steps required to solve each question. We began + +by gathering detailed, step-by-step solutions from the models for each question, which are manually verified for completeness. Then we classified if each step is a logical reasoning step with the help of LLM. We show the result in Table 4. On average, logical reasoning steps take up $14.8\%$ more total steps in solving VISUALPUZZLES questions compared to those of MMMU (82.1% v.s. 71.5%). This analysis is based on GPT-4o and Gemini-2.0-Flash across 200 randomly sampled questions per benchmark. These results suggest that VISUALPUZZLES demand more extensive reasoning, aligning with its goal of evaluating deeper multimodal reasoning beyond factual recall. Prompt example is shown in Appendix F. + +
ModelMMMUVISUALPUZZLES
GPT-4o75.1%87.0%
Gemini-2.0-Flash67.9%77.3%
+ +Table 4: Percentage of logical reasoning steps in solving benchmark questions. + +# 4.3 Do Reasoning Models Perform Better than Their Baselines? + +![](images/097011d809433a79022988b41408051bc1ba5980c55cdc75b274a14638a0f3d4.jpg) +Figure 4: Comparison of accuracy and average number of total completion tokens of reasoning models and their general counterparts on VISUALPUZZLES. We didn't include Gemini-2.0-Flash models here because Gemini-2.0-Flash-Thinking does not reveal the number of reasoning tokens of responses. The accuracies of Gemini-2.0-Flash and Gemini-2.0-Flash-Thinking is $45.0\%$ and $42.2\%$ respectively. Despite much higher number of completion tokens, reasoning models do not often achieve better performance on VISUALPUZZLES. + +![](images/9d2390fe3c1b50f4c9e0c3fc610d172eb6b346f542c089a5ccbaa752243e3aff.jpg) + +![](images/b3f117d6f51d23f162333ef36f7641cdcf69a8d721c3509ea76381b786e355f5.jpg) + +Recent reasoning models often scale up inference compute by generating longer chains of thought (CoTs) to enhance reasoning ability. To assess the effectiveness of this strategy on VISUALPUZZLES, we compare several reasoning models with their non-reasoning counterparts in Figure 4. The reasoning model o1 outperforms GPT-4o overall. However, structured "thinking" modes, despite much higher number of completion tokens, show no consistent benefit. Similarity of output further reveals that the thinking mode primarily increases vocabulary without meaningfully altering the underlying reasoning process, as illustrated in Figure 13. + +# 4.4 Are Branching and Revalidation Reasoning Patterns Effective on VISUALPUZZLES? + +As discussed in Section 4.3, reasoning-enabled models do not consistently outperform their non-reasoning counterparts on VISUALPUZZLES. To better understand this discrepancy, we + +![](images/b536792942d0b01abaf60cfa156d36a936bb060f4df1b04a5055134b1d998792.jpg) +Figure 5: Comparison of Reasoning Pattern of Claude-3.7-Sonnet-Thinking on MMMU and VISUALPUZZLES. Left figure compares the accuracy of Claude-3.7-Sonnet and Claude-3.7-Sonnet-Thinking on MMMU and VISUALPUZZLES. Middle figure shows frequency of each pattern. Right figure shows correlation of the patterns with accuracy on the benchmarks. + +![](images/523a7fd5906283d19e4b6b98b68b5d07b68a0003d35bf321828d973d89158391.jpg) + +![](images/1662aa990cd75de3f0e4e15080707ff2d3e5d543c8a719718bfdc6202b92da36.jpg) + +examine Claude-3.7-Sonnet-Thinking's reasoning behaviors present in long CoTs, specifically, branching and re-validation, which are known to play important roles in enhancing reasoning performance4. + +As shown in Figure 5, our analysis reveals a striking contrast between benchmarks. On MMMU, both branching and re-validation correlate positively with model accuracy. These strategies help models explore alternative reasoning paths and revisit earlier steps, aiding in the retrieval of relevant factual knowledge, an essential component for solving MMMU's knowledge-intensive questions. An illustrative example is provided in Appendix E. + +Surprisingly, on VISUALPUZZLES, these reasoning behaviors are more frequent, yet less predictive of success. Despite their increased presence in long-form responses, we observe no significant correlation between these strategies and task accuracy. This suggests that models may be using branching and re-validation in ways that do not meaningfully contribute to solving the problem. + +Figure 6 highlights this with an example from Claude-3.7-Sonnet-Thinking, where the model applies branching on a VISUALPUZZLES puzzle. However, the additional reasoning paths remain shallow and fail to engage with the core challenge—understanding the spatial arrangement of chairs in the image. The full response is included in Appendix E. + +# 5 Analysis + +Question: Given a pair of chairs arranged at a specific angle, select the option that matches the angle of the chairs in the question. (Answer: C) + +![](images/d90d53e47cf6e5230fb7578325d85a11aeb477745ec81d8999ae9db553e82ac7.jpg) +Figure 6: An example of Claude-3.7-Sonnet-Thinking utilizing branching to solve a VISUALPUZZLES puzzle. + +Claude-3.7-Sonnet (With Thinking) +Let me examine the chairs in the question: +- The chairs are positioned 180 degrees apart. +- They are shown in profile view. +Option A: ...; Option B: ...; Option C: ...; Option D: ... Any of them is correct from an angular perspective.. +However, looking more carefully at the exact orientation: +- The original chairs are shown in perfect profile view +- Option A shows chairs in perfect profile view, .... .. +I'll go with Option A as it presents the cleanest profile view that matches the arrangement in the question. + +# 5.1 Do Models Approach VISUALPUZZLES Questions Differently? + +Table 5 shows the statistics of Claude-3.7-Sonnet-Thinking's answering strategy. We observe a clear divergence in answering strategies between MMMU and VISUALPUZZLES. On MMMU, the + +
BenchmarkAnswer-FirstOption-First
MMMU29.3%70.7%
VISUALPUZZLES (Image Options)72.5%27.5%
VISUALPUZZLES (Text Options)98.3%1.7%
+ +Table 5: Answering Strategy + +model tend to follow an option-driven approach—using the provided choices early to eliminate unlikely answers and select the most relevant one, often without explicitly solving the problem. In contrast, models more frequently adopt an answer-first strategy on VISUALPUZZLES, attempting to solve the question independently before comparing + +the result to the answer choices. This pattern holds across both textual and image-based options, though the option-first approach appears slightly more often (around $30\%$ ) for image-based tasks—likely due to the added complexity of visual comparison. + +# 5.2 Does model performance transfer between reasoning categories? + +![](images/97e95fdf454706c7da694622cce81f46164cd08513842a89a130211e7210be6f.jpg) +Figure 7: Correlation Heatmap among reasoning categories for models (averaged across all models we evaluated). + +Figure 7 presents a correlation heatmap illustrating the relationships among the five reasoning categories in VISUALPUZZLES. We report model correlations averaged across all models in Table 2. For humans, each reasoning category likely engages different cognitive or mental processes (Goel & Dolan, 2004; Green et al., 2010; Bright & Feeney, 2014; Babcock & Vallesi, 2015), so performance in one category might not transfer to performance in another. However, the correlation heatmap of the models tells a different story. We observe notably strong correlations across reasoning categories, with values ranging from 0.11 to as high as 0.94. In particular, algorithmic and deductive reasoning show high correlation (0.94), and other pairs such as algorithmic-analogical and deductive-analogical also exhibit strong associations. This suggests + +that model performance tends to generalize across categories. However, this generalization may not reflect true reasoning abilities. Instead, the high correlations could indicate that models are leveraging shared surface-level patterns or shortcut strategies that happen to work across multiple structurally different categories, unlike humans, who may rely on distinct cognitive processes. + +# 5.3 Error Analysis + +Figure 8 shows a pie chart illustrating the distribution of error categories of 100 instances generated by Claude-3.7-Sonnet-Thinking on VISUALPUZZLES, revealing that reasoning errors dominate at $56\%$ , reinforcing the fact that reasoning is greatest challenge to models in VISUALPUZZLES. Perceptual errors $(21\%)$ and spatial / orientation errors $(17\%)$ also constitute substantial portions of failures, reflecting difficulties in interpreting visual elements and understanding spatial relationships. These three categories together account for $94\%$ of mistakes, emphasizing a need for multimodal models with stronger reasoning capabilities with more robust perception and spatial understanding. Textual and visual understanding errors $(4\%)$ and reject-to-answer cases $(2\%)$ are relatively rare. Appendix I shows samples of error and correct cases of each reasoning and difficulty category. + +![](images/9be1b71d51b868e947ea438ebbee99b5295830e1be6a90035757235c7e3f403e.jpg) +Figure 8: Error Distribution of Claude-3.7-Sonnet-Thinking + +# 6 Related Work + +Multimodal Language Models (MLLMs), particularly vision language models have experienced significant improvements recently. Large scale vision language models (Gemini et al., 2023); (OpenAI, 2024); (Anthropic, 2022); including open weight ones (Li et al., 2024); (Yue et al., 2025); (Liu et al., 2024b); (Tong et al., 2024); (Dubey et al., 2024) are capable of utilizing both image and text inputs to solve challenging questions. + +Multimodal reasoning models, models that specialize in complex reasoning, further push the boundary of MLLMs' capabilities. Large scale multimodal reasoning models such as QVQ (Qwen Team, 2024), Claude-3.7-Sonnet-thinking (Anthropic, 2022), o1 (Jaech et al., 2024), Gemini-2.0-flash-thinking (Gemini et al., 2023) excel in reasoning heavy tasks such as coding and solving math problems. + +Multimodal Reasoning Benchmarks. There exists a number of multimodal benchmarks that test on both the models' world knowledge and reasoning abilities. These benchmarks (Yue et al., 2024a); (Marino et al., 2019); (Liu et al., 2023b); (Yue et al., 2024b); (Authors, 2025) emphasize on the multimodal ability of models as a whole, without further separation of knowledge and reasoning. + +Recently, more multimodal benchmarks have placed emphasis on multimodal logical reasoning abilities. Many of them (Lu et al., 2023); (Wang et al., 2024b) focus primarily on mathematic problems, testing on both mathematical knowledge and reasoning. Some others cover on more general logical reasoning problems (Cherian et al., 2022b); (Gao et al., 2023), testing on both models' knowledge and reasoning in different domains. + +# 7 Conclusion and Future Work + +We presented VISUALPUZZLES, a novel multimodal benchmark carefully designed to minimize the impact of domain-specific knowledge and isolate models' core reasoning capabilities. Our results show that while proprietary and large-scale open models achieve relatively higher performance, they still fall short of human-level reasoning—especially on more complex tasks such as analogical and inductive reasoning. Moreover, we observe that strong performance on knowledge-intensive benchmarks like MathVista and MMMU does not necessarily translate into high accuracy on VISUALPUZZLES, underscoring the distinct challenge of knowledge-light reasoning tasks. + +These findings suggest that purely scaling model size and knowledge resources may not suffice for robust multimodal reasoning skills; rather, methods that promote structured reasoning, such as explicit thinking modes or recursive reasoning steps, can offer substantial improvements, particularly for hard questions. Future research can explore new training strategies, specialized architectures, or model interpretations tailored to reduce reliance on memorized facts and enhance logical inference. Extending VISUALPUZZLES to include additional types of multi-image reasoning or temporally dynamic visual information may further stress-test models' core inference abilities. By disentangling domain knowledge from multimodal reasoning, we hope VISUALPUZZLES will serve as a valuable tool for developing and evaluating next-generation MLLMs that excel at genuinely understanding and reasoning about the world without depending heavily on specialized factual knowledge. + +# 8 Limitations + +Disentangling Knowledge Despite our best efforts to isolate domain-specific knowledge from the evaluation of multimodal reasoning, VISUALPUZZLES is still not entirely free of knowledge dependencies. Basic familiarity with everyday objects or common scenarios is still required; complete knowledge free evaluation remains an ideal rather than a practical reality. + +Real World Application VISUALPUZZLES emphasizes puzzle-like questions that may not reflect the full diversity of real-world scenarios, limiting generalizability to more specialized domains. + +Question Format VISUALPUZZLES focuses on multiple-choice questions, which may not capture the breadth of open-ended reasoning tasks where models must generate complex textual or visual outputs. + +Future work can address these limitations by including more varied question formats, broader domains, and more granular analyses of a model's knowledge versus its multimodal reasoning abilities. + +# 9 Ethical Statement + +This paper uses samples extracted from existing quiz sources for scholarly analysis and testing purposes, in accordance to US fair use law and standard practice. These data are neither intended for, nor capable of, substituting for the original works; thus, we believe their inclusion does not diminish the market value or utility of the source materials. A complete list of references for the data sources is attached in Appendix A. + +# Acknowledgements + +This project was supported in part by a grant from DSTA Singapore and the Carnegie Bosch Institute. The authors would like to thank CMU NeuLab colleagues for their constructive comments. The authors would also like to thank all volunteers who participated in the human evaluation. + +# References + +https://www.anthropic.com/index/introducing-claudeAnthropic. Claude, 2022. URL https://www.anthropic.com/index/introducing-claude. +Humanity's Last Exam's Authors. Humanity's last exam. ArXiv, abs/2501.14249, 2025. URL https://api-semanticscholar.org/CorpusID:275906652. +Laura Babcock and Antonino Vallesi. The interaction of process and domain in prefrontal cortex during inductive reasoning. Neuropsychologia, 67:91-99, 2015. +Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-VL: A versatile vision-language model for understanding, localization, text reading, and beyond, 2024. URL https://openreview.net/forum?id=qrGjFJV13m. +Yonatan Bitton, Ron Yosef, Eliyahu Strugo, Dafna Shahaf, Roy Schwartz, and Gabriel Stanovsky. Vasr: Visual analogies of situation recognition. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 37, pp. 241-249, 2023. +Aimée K Bright and Aidan Feeney. Causal knowledge and the development of inductive reasoning. Journal of Experimental Child Psychology, 122:48-61, 2014. +Anoop Cherian, Kuan-Chuan Peng, Suhas Lohit, Kevin Smith, and Joshua B Tenenbaum. Are deep neural networks smarter than second graders? arXiv preprint arXiv:2212.09993, 2022a. +Anoop Cherian, Kuan-Chuan Peng, Suhas Lohit, Kevin A. Smith, and Joshua B. Tenenbaum. Are deep neural networks smarter than second graders? 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 10834-10844, 2022b. URL https://api-semanticscholar.org/CorpusID:254877678. +DeepSeek-AI. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning, 2025. URL https://arxiv.org/abs/2501.12948. +Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. ArXiv preprint, abs/2407.21783, 2024. URL https://arxiv.org/abs/2407.21783. + +Jingying Gao, Qi Wu, Alan Blair, and Maurice Pagnucco. Lora: A logical reasoning augmented dataset for visual question answering. In Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2023. +Gemini, Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, et al. Gemini: a family of highly capable multimodal models. ArXiv preprint, abs/2312.11805, 2023. URL https://arxiv.org/abs/2312.11805. +Vinod Goel and Raymond J Dolan. Differential involvement of left prefrontal cortex in inductive and deductive reasoning. Cognition, 93(3):B109-B121, 2004. +Adam E Green, David JM Kraemer, Jonathan A Fugelsang, Jeremy R Gray, and Kevin N Dunbar. Connecting long distance: semantic distance in analogical reasoning modulates frontopolar cortex activity. Cerebral cortex, 20(1):70-76, 2010. +Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024. +Bo Li*, Peiyuan Zhang*, Kaicheng Zhang*, Fanyi Pu*, Xinrun Du, Yuhao Dong, Haotian Liu, Yuanhan Zhang, Ge Zhang, Chunyuan Li, and Ziwei Liu. Lmms-eval: Accelerating the development of large multimoal models, March 2024. URL https://github.com/EvolvingLMMs-Lab/lmms-eval. +Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024. +Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning, 2023a. URL https://arxiv.org/abs/2310.03744. +Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava-last: Improved reasoning,OCR, and world knowledge, 2024a. URL https://arxiv.org/pdf/2401.13601. +Jian Liu, Leyang Cui, Hanmeng Liu, Dandan Huang, Yile Wang, and Yue Zhang. Logiqa: A challenge dataset for machine reading comprehension with logical reasoning, 2020. +Junpeng Liu, Tianyue Ou, Yifan Song, Yuxiao Qu, Wai Lam, Chenyan Xiong, Wenhu Chen, Graham Neubig, and Xiang Yue. Harnessing webpage uis for text-rich visual understanding. ArXiv, abs/2410.13824, 2024b. URL https://api(semanticscholar.org/ CorpusID:273403951. +Yuanzhan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, Kai Chen, and Dahua Lin. Mmbench: Is your multi-modal model an all-around player? In European Conference on Computer Vision, 2023b. URL https://api_semanticscholar.org/CorpusID:259837088. +Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. arXiv preprint arXiv:2310.02255, 2023. +Kenneth Marino, Mohammad Rastegari, Ali Farhadi, and Roozbeh Mottaghi. Ok-vqa: A visual question answering benchmark requiring external knowledge. 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 3190-3199, 2019. URL https://api_semanticscholar.org/CorpusID:173991173. +OpenAI. Hello gpt4-o. https://openai.com/index/hello-gpt-4o/, 2024. URL https://openai.com/index/hello-gpt-4o/. +Qwen Team. Qvq: To see the world with wisdom, December 2024. URL https://qwenlm.github.io/blog/qvq-72b-preview/. + +Qwen Team. Qwen2.5-vl, January 2025a. URL https://qwenlm.github.io/blog/qwen2.5-v1/. +Qwen Team. Qwq-32b: Embracing the power of reinforcement learning, March 2025b. URL https://qwenlm.github.io/blog/qwq-32b/. +Jonathan Roberts, Kai Han, Neil Houlsby, and Samuel Albanie. SciFIBench: Benchmarking large multimodal models for scientific figure interpretation. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2024. URL https://openreview.net/forum?id=HcLFNuQwy5. +Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, et al. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. ArXiv preprint, abs/2406.16860, 2024. URL https://arxiv.org/abs/2406.16860. +Ke Wang, Junting Pan, Weikang Shi, Zimu Lu, Houxing Ren, Aojun Zhou, Mingjie Zhan, and Hongsheng Li. Measuring multimodal mathematical reasoning with math-vision dataset. Advances in Neural Information Processing Systems, 37:95095-95169, 2024a. +Ke Wang, Junting Pan, Weikang Shi, Zimu Lu, Mingjie Zhan, and Hongsheng Li. Measuring multimodal mathematical reasoning with math-vision dataset, 2024b. URL https:// arxiv.org/abs/2402.14804. +An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, et al. Qwen2 technical report. ArXiv preprint, abs/2407.10671, 2024. URL https://arxiv.org/abs/2407.10671. +Xiang Yue, Yuansheng Ni, Kai Zhang, Tianyu Zheng, Ruoqi Liu, Ge Zhang, Samuel Stevens, Dongfu Jiang, Weiming Ren, Yuxuan Sun, Cong Wei, Botao Yu, Ruibin Yuan, Renliang Sun, Ming Yin, Boyuan Zheng, Zhenzhu Yang, Yibo Liu, Wenhao Huang, Huan Sun, Yu Su, and Wenhu Chen. Mmmu: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi. In Proceedings of CVPR, 2024a. +Xiang Yue, Tianyu Zheng, Yuansheng Ni, Yubo Wang, Kai Zhang, Shengbang Tong, Yuxuan Sun, Botao Yu, Ge Zhang, Huan Sun, et al. Mmmu-pro: A more robust multi-discipline multimodal understanding benchmark. arXiv preprint arXiv:2409.02813, 2024b. +Xiang Yue, Yueqi Song, Akari Asai, Simran Khanuja, Anjali Kantharuban, Seungone Kim, Jean de Dieu Nyandwi, Lintang Sutawika, Sathyanarayanan Ramamoorthy, and Graham Neubig. Pangea: A fully open multilingual multimodal LLM for 39 languages. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=a3g214yEys. + +# Table of Contents in Appendix + +# A VISUALPUZZLES Statistics 16 + +A.1 Breakdown of Statistics of VISUALPUZZLES 16 +A.2 Data Sources 16 + +# B Model Evaluation Setup 16 + +# C Human Annotation Setup 16 + +C.1 Difficulty Labeling 16 +C.2 Reasoning Category Labeling 17 + +# D Full Results 17 + +D.1 Full Results w/ CoT 17 +D.2 Full Results w/n CoT 17 + +# E Knowledge Checklist 17 + +E.1 Knowledge Checklist Generation 17 +E.2 Example Knowledge Checklist Question 20 +E.3 Knowledge Checklist Human Annotation 20 + +# F Reasoning Complexity 20 + +# G Comparison with Other Benchmarks 20 + +# H Additional Analysis 21 + +H.1 Proprietary V.S. Open Models 21 +H.2 Reasoning Category and Difficulty Levels 21 +H.3 Option Types and Difficulty Levels 24 +H.4 Case Study of Reasoning 25 +H.5 Impact of CoT 25 + +# I Case Study 27 + +# A VISUALPUZZLES Statistics + +# A.1 Breakdown of Statistics of VISUALPUZZLES + +Table 6 shows a breakdown of statistics of VISUALPUZZLES questions. + +
Reasoning CategoryImage OptionsText OptionsTotal
EasyMediumHardEasyMediumHard
Algorithmic21801241009262
Analogical1208110000211
Deductive29242457921200
Inductive770127320209
Spatial12341661523286
Total300224145233233331168
+ +# A.2 Data Sources + +- Chinese Civil Service Examination (中国国家公务员考试) 5 (224 puzzles): we manually translated questions from this exam to English from Chinese. +Textbooks (210 puzzles): we carefully collected and re-purposed questions from online resources and textbooks. +- Smart-101 (Cherian et al., 2022a) (247 puzzles): we carefully selected images from this benchmark and synthesized new questions. +- MATH-Vision (Wang et al., 2024a) (293 puzzles): we carefully selected and repurposed questions from this benchmark. +VASR (Bitton et al., 2023) (194 puzzles): we carefully selected questions from this benchmark. + +# B Model Evaluation Setup + +Table 6: Number of questions in each reasoning category, option types, and difficulty levels. + +
Model Evaluation Prompt with Chain-of-Thought
Solve the multiple-choice question and then answer with the option letter from the given choices. The last line of your response should be of the following format: 'Answer: $LETTER' (without quotes) where LETTER is one of options. Think step by step before answering.
Model Evaluation Prompt w/n Chain-of-Thought
Answer the question with the option's letter from the given choices directly.
+ +# C Human Annotation Setup + +# C.1 Difficulty Labeling + +Each question was also carefully assigned a difficulty label from easy, medium, or hard, based on the cognitive load required for reasoning. + +- Easy Level questions could be solved by the annotator in less than one minute. +- Medium Level questions could be solved by the annotator in one to three minutes. + +- Hard Level questions require the annotator more than five minutes to solve or quit solving. + +# Annotation Guideline for Puzzle Difficulty + +Try to solve the puzzle first. You need to measure the time you attempted to solve each puzzle. Then, select from Easy, Medium, or Hard based on the time required. + +- Easy Level: You can solve or answer the question within 1 minute. This level of puzzles should require minimal reasoning. +- Medium Level: You can solve or answer the question within 1-3 minutes. This level of puzzles should demand moderate reasoning. +- Hard Level: You can / cannot solve this question with more than 5 minutes. This level of puzzles should involve significant / multi-step reasoning. + +# C.2 Reasoning Category Labeling + +# Annotation Guideline for Puzzle Reasoning Category + +Assign the category that best describes the primary type of reasoning or logic required for each puzzle: +- Algorithmic Reasoning: Involves following or devising a step-by-step procedure or rule-based process. +- Analogical Reasoning: Requires identifying relationships by comparison between pairs of entities. +- Deductive Reasoning: Involves deriving specific conclusions from general or given premises. +- Inductive Reasoning: Focuses on generalizing a rule or pattern from specific instances. +- Spatial Reasoning: Involves visualizing and manipulating shapes, distances, or orientations. + +# D Full Results + +# D.1 Full Results w/ CoT + +# D.2 Full Results w/n CoT + +# E Knowledge Checklist + +# E.1 Knowledge Checklist Generation + +# Prompt to Generate Knowledge Checklist Questions + +You are an exam writer. You are now writing a knowledge test. You are given a question (Question) regarding an image and its standard solution (Solution), your task is to write free response questions that test on individual knowledge required in answering the question correctly. + +You should follow these steps to complete the task: + +1. explicitly analyze the given image, Question, and Solution +2. explicitly list out the individual knowledge concepts required to reach Solution. +3. write free response questions to test on the definition of each concept listed. Your generated questions should not include details of the given Question. Note that you need to provide answer keys to these questions too. +4. format the free response questions in json format. + +Question: question + +Solution: answer + +
ModelAlgorithmicAnalogicalDeductiveInductiveSpatialOverall
Random Choice25.025.025.025.025.025.0
Human (95th Percentile)100.0100.0100.081.6100.089.3
Human (50th Percentile)88.066.080.050.090.075.0
Human (5th Percentile)68.125.037.00.059.157.5
Proprietary Models
o4-mini65.368.775.533.045.557.0
o364.568.369.527.342.754.0
o163.768.367.529.234.351.8
GPT-4o49.258.349.027.326.241.3
Gemini-2.5-pro60.064.060.029.736.449.5
Gemini-2.0-flash55.358.857.024.431.845.0
Gemini-2.0-flash-thinking46.670.149.024.925.542.2
Gemini-1.5-Pro53.457.458.526.332.545.0
Claude-3.7-Sonnet64.548.365.026.837.448.3
Claude-3.7-Sonnet-thinking67.244.161.531.137.148.2
Claude-3.5-Sonnet53.447.951.525.434.342.4
Open Models
LLaVA-1.5-7B23.321.836.020.619.223.7
LLaVA-1.5-13B24.821.823.025.425.524.2
LLaVA-1.6-7B27.523.730.022.521.324.8
LLaVA-1.6-13B25.225.627.027.323.425.5
LLaVA-1.6-34B29.428.043.024.925.529.7
LLaVA-OV-0.5B21.026.130.522.525.224.8
LLaVA-OV-7B27.926.136.523.425.527.7
LLaVA-OV-72B34.726.537.027.328.730.8
Llama-3.2-11B-Vision-Instruct31.030.839.021.126.229.4
Llama-3.2-90B-Vision-Instruct45.023.243.026.331.534.1
Qwen-VL21.431.325.026.324.125.3
Qwen2-VL-72B41.628.439.522.529.032.4
QvQ-72B-Preview43.145.548.027.327.637.8
Qwen2-VL-2B-Instruct26.026.124.527.825.526.0
Qwen2-VL-7B-Instruct36.321.838.520.622.727.9
Qwen2-VL-72B-Instruct39.933.545.223.532.434.9
Qwen2.5-VL-3B-Instruct35.127.544.525.824.831.2
Qwen2.5-VL-7B-Instruct40.526.639.024.029.732.1
Qwen2.5-VL-72B-Instruct53.446.958.025.829.542.3
Cambrian-8B31.324.236.024.029.028.9
Cambrian-13B24.825.639.524.421.026.5
Pangea-7B30.528.935.024.425.228.6
+ +Table 7: Performance (%) of various models with Chain of Thoughts (CoT) on VISUALPUZZLES. + +
ModelAlgorithmicAnalogicalDeductiveInductiveSpatialOverall
Random Choice25.025.025.025.025.025.0
Human (95th Percentile)100.0100.0100.081.6100.089.3
Human (50th Percentile)88.066.080.050.090.075.0
Human (5th Percentile)68.125.037.00.059.157.5
Proprietary Models
GPT-4o40.834.140.524.929.734.0
Gemini-2.0-flash57.641.758.023.035.743.2
Gemini-1.5-Pro51.246.554.024.929.440.8
Open Models
LLaVA-1.5-7B24.424.734.526.825.526.9
LLaVA-1.5-13B24.426.133.526.328.327.6
LLaVA-1.6-7B27.525.132.524.927.327.4
LLaVA-1.6-13B21.424.729.528.223.125.0
LLaVA-1.6-34B31.327.343.024.427.629.8
LLaVA-OV-0.5B24.425.637.524.925.527.2
LLaVA-OV-7B27.528.040.524.428.029.4
LLaVA-OV-72B31.723.645.021.324.628.8
Llama-3.2-11B-Vision-Instruct27.524.231.026.327.627.3
Llama-3.2-90B-Vision-Instruct38.222.344.525.833.633.1
Qwen-VL23.726.529.527.826.626.6
Qwen2-VL-72B38.928.443.020.629.032.0
QvQ-72B-Preview44.843.644.026.830.837.8
Qwen2-VL-2B-Instruct31.729.440.523.931.531.3
Qwen2-VL-7B-Instruct33.624.246.022.526.230.2
Qwen2-VL-72B-Instruct40.530.346.025.429.434.2
Qwen2.5-VL-3B-Instruct36.326.147.025.822.431.0
Qwen2.5-VL-7B-Instruct38.223.751.524.931.133.7
Qwen2.5-VL-72B-Instruct43.140.351.525.433.738.6
Cambrian-8B25.220.435.023.020.624.5
Cambrian-13B23.328.036.524.926.227.4
Pangea-7B32.423.738.528.732.531.3
+ +Table 8: Performance (%) of various models with Multiple Choice Direct prompting on VISUALPUZZLES. + +# E.2 Example Knowledge Checklist Question + +# Example Knowledge Checklist Question (MMMU) + +- Question: Explain the Arbitrage Pricing Theory (APT) model and its purpose in finance. +- Answer: The Arbitrage Pricing Theory (APT) model is a financial theory that estimates the expected return on an asset based on the asset's sensitivity to various macroeconomic factors. It is used to determine the fair price of an asset by considering multiple factors that could affect its return, as opposed to relying on a single market index as in the Capital Asset Pricing Model (CAPM). + +# Example Knowledge Checklist Question (VISUALPUZZLES) + +- Question: What is the definition of distance in a geometric context? +- Answer: Distance in a geometric context refers to the measurement of space between two points. + +# E.3 Knowledge Checklist Human Annotation + +We asked two human annotators to manually verify and correct the knowledge checklist questions and gave them the following instructions. The inter-annotator agreement rate is $87.8\%$ . + +# Human Annotation Instructions + +You are given a json file, where each item contains the following elements: + +- Question: a multiple-choice question. +- Answer: the answer to the question with an optional explanation. +- Knowledge Concept Checklist: a list of question-answer pairs, where each question in the list is intended to represent a distinct knowledge concept necessary for solving the Question. + +You task is to annotate the knowledge concept checklists generated by a model. You should carefully evaluate each question-answer pair based on the following criteria: + +1. Necessity: Is the question genuinely necessary for solving the problem? If not, then remove the question. +2. Repetition: Check if any questions are repetitive or duplicate existing questions within the list. If the question is repetitive or duplicate, then remove the question. +3. Completeness: Ensure no critical knowledge concepts required to solve the problem are missing, and identify if any additional important questions should have been included. +4. Correctness: Verify whether the provided answers are accurate. Revise the checklist in case of incorrect checklist QA pairs. +5. Knowledge v.s. Skills: Ensure each question explicitly evaluates a knowledge concept rather than testing skills or problem-solving techniques. Remove any questions that primarily evaluate skills instead of knowledge. + +# F Reasoning Complexity + +# Instruction Prompt to Solve Questions in Detailed Steps + + < \text{Imoge}>$ + +Solve this question with First Order Logic. Write out each thinking step explicitly, do not skip steps. In your response, begin each step with ____STEP_START__. + +step $<$ step_num> + +# G Comparison with Other Benchmarks + +Figure 9 provides a comparative analysis between VISUALPUZZLES and several widely-used benchmarks for multimodal reasoning, visualizing the knowledge requirement and reasoning complexity of each benchmark. VISUALPUZZLES has high reasoning complexity and low knowledge requirement, with an aim to disentangle multimodal reasoning from domain-specific knowledge to evaluate general reasoning abilities in non-expert settings. + +
DatasetSizeReasoning LoadKnowledge Requirement% Easy Words Question TypeAnswer Type
LogiQA0.7KHeavyLight52.0TextText
GSM8K8.5KHeavyHeavy54.0TextText
WikiDiverse0.8KLightHeavy35.8Image+TextText
MathVista6.1KHeavyHeavy51.9Image+TextText
MMMU11.5KHeavyHeavy46.4Image+TextText
MATH-Vision3.0KHeavyHeavy53.8Image+TextImage+Text
MathVerse2.6KHeavyHeavy38.2Image+TextText
LogicBench1.5KHeavyLight53.6TextText
LogicVista0.4KHeavyHeavy41.2Image+TextImage
NaturalBench10KLightLight52.5Image+TextText
VISUALPUZZLES1.2KHeavyLight54.1Image+TextImage+Text
+ +Table 9: Comparison of other existing benchmarks with VISUALPUZZLES + +![](images/5a9945fb2950cbab9e5972ec413e60b62a9b0274be1ab32e1b5d8735f9bb79f7.jpg) +Figure 9: Comparison between VISUALPUZZLES and several widely-used benchmarks. + +Table 10 compare the performance of various model families across MathVista, MMMU, and VISUALPUZZLES. Both MathVista and MMMU are benchmarks that have a heavy emphasis on both knowledge and reasoning, whereas VISUALPUZZLES assess models on domain-disentangled multimodal reasoning alone. We found that success on knowledge-intensive multimodal reasoning benchmarks as MathVista and MMMU does not always carry over to VISUALPUZZLES that emphasize reasoning rather than extensive pre-trained knowledge. + +# H Additional Analysis + +# H.1 Proprietary V.S. Open Models + +From Table 2, proprietary models (e.g., o4-mini and Claude-3.7-Sonnet) consistently achieve higher overall accuracy than most open-source models on VISUALPUZZLES. However, some open models also show competitive or even higher performance in both the overall accuracy and specific reasoning categories. For instance, Qwen2.5-VL-72B-Instruct demonstrates higher performance than GPT-4o on algorithmic reasoning, deductive reasoning, spatial reasoning, and overall accuracy. This indicates that while proprietary models currently have leading performance, open models are also rapidly improving on multimodal reasoning capabilities. + +# H.2 Reasoning Category and Difficulty Levels + +Figure 11 and Figure 10 present complementary views of human accuracy against three representative models: o1 (one of the best-performing proprietary models), Qwen2.5-VL72B-Instruct (the strongest Qwen-based open model), and Llama-3.2-90B-Vision-Instruct (the strongest Llama-based open model). Specifically, Figure 10 compares performance across difficulty levels for each reasoning category, while Figure 11 compares performance across categories within each difficulty level. + +
ModelMathVistaMMMUVISUALPUZZLES
Human60.388.680.1
o173.978.251.8
GPT-4o63.869.141.1
Gemini-2.0-Flash-71.745.0
Gemini-1.5-Pro63.962.245.4
Claude-3.5-Sonnet67.768.342.4
Claude-3.7-Sonnet-71.848.3
Claude-3.7-Sonnet (Thinking)-75.048.3
LLaVA-1.5-7B-36.226.9
LLaVA-1.5-13B27.636.427.6
LLaVA-NeXT-7B35.834.627.4
LLaVA-NeXT-13B36.235.325.3
LLaVA-NeXT-34B46.551.129.8
LLaVA-OV-0.5B34.831.427.2
LLaVA-OV-7B63.248.829.4
LLaVA-OV-72B67.556.831.8
Llama-3.2-11B-Vision-Instruct51.550.729.4
Llama-3.2-90B-Vision-Instruct57.360.334.3
Qwen2-VL-72B70.564.532.1
QvQ-72B-Preview71.470.337.9
Qwen2-VL-2B-Instruct43.041.131.3
Qwen2-VL-7B-Instruct58.254.130.2
Qwen2-VL-72B-Instruct70.564.534.9
Qwen2.5-VL-3B-Instruct62.353.131.2
Qwen2.5-VL-7B-Instruct68.258.633.7
Qwen2.5-VL-72B-Instruct74.870.242.3
Cambrian-8B49.042.728.5
Cambrian-13B48.040.027.4
+ +Table 10: Comparison of other MathVista and MMMU with VISUALPUZZLES on human and SOTA models + +Humans consistently outperform all models across categories and difficulty levels, often by large margins. Notably, human performance remains high and relatively stable in the algorithmic, deductive, and spatial categories, even on hard questions. While accuracy does decline in analogical and inductive reasoning as difficulty increases, humans still maintain a clear advantage over models. + +In contrast, model performance declines sharply as difficulty increases, especially for open-source models. Accuracy of Llama-3.2-90B-Vision-Instruct on hard analogical tasks drops to just $10\%$ . Even one of the strongest proprietary models, o1, while more robust, still lags significantly behind humans, particularly on analogical, inductive, and spatial tasks. On easy tasks, some models perform competitively in certain categories, but this advantage largely disappears on medium and hard questions. + +Interestingly, these models maintain a generally stable performance on algorithmic and deductive reasoning. For o1 and Qwen2.5-VL-72B-Instruct, their performances on algorithmic reasoning even go up for more difficult tasks, whereas human performance degraded as the difficulty level increases. However, all models, including o1, perform the worse at analogical, inductive and spatial reasoning in general, especially as the difficulty level increases. This suggests that models are relatively better at tasks requiring structured, rule-based algorithmic processing, while their performance degrades more steeply in tasks requiring relational abstraction (analogical), pattern induction (inductive), and visual understanding (spatial), particularly as the difficulty level increases. In summary, these results indicate that while some models exhibit promising performance on structured and easier reasoning tasks, multimodal models still struggle with abstract and complex reasoning, particularly + +![](images/e07d22a4f5fa74094ba8126af08a759927de508d0089c6af0dc1d22ad43d3d84.jpg) + +![](images/2eb08ceca733cd8c85e3edca9cf46721d82bbaf628b9e06926a75a16c7a708b2.jpg) + +![](images/c5e2c4db744c3657fe1cb4531ced0aa2d78982d0bb5fcdc58270986c6c22c141.jpg) + +![](images/70f13531e69835c70feace1f0c5d4cb9f407929b98d21c7bf77f8b7957c853c2.jpg) + +![](images/dc9f11d9c6ca0344d00277395624f99e2b3066e6ca555339dae6e6a396995963.jpg) +Figure 10: Comparison of accuracy across different reasoning categories for human participants, one of the best performing proprietary models o1, the best performing Qwen-based open model Qwen2.5-VL-72B-Instruct, and the best performing Llama-based open model Llama-3.2-90B-Vision-Instruct, measured on difficulty levels. + +![](images/26d9b479b586179672f0a391c3433b4482c8e8deeca5515d3b7db271c2635940.jpg) + +![](images/2df83bc62266ef79c8c7d72021db90c5bdc93adf7f3d635def5ee2e1de1692b1.jpg) + +![](images/932cbaf51c205a9c980fb43076c9fd7e28519b492074fde5af65adb7c67653ef.jpg) +Figure 11: Comparison of accuracy across different difficulty levels for human participants, one of the best performing proprietary models o1, the best performing Qwen-based open model Qwen2.5-VL-72B-Instruct, and the best performing Llama-based open model Llama3.2-90B-Vision-Instruct, measured across reasoning categories. + +when difficulty increases. Bridging the gap between model and human reasoning remains a critical challenge. + +# H.3 Option Types and Difficulty Levels + +Figure 12 compares human accuracy against three representative models, o1 (one of the best-performing proprietary models), Qwen2.5-VL-72B-Instruct (the strongest Qwen-based open model), and Llama-3.2-90B-Vision-Instruct (the strongest Llama-based open model), across different difficulty levels, separately for textual and visual answer options. + +Across all participants and models, we observe a consistent pattern: text-based options result in higher accuracy than image-based options, with the performance gap widening as task difficulty increases. This trend holds even for human participants, whose accuracy drops from $92\%$ to $40\%$ on visual options when moving from easy to hard tasks, compared to a much smaller drop on text-based ones ( $93\%$ to $73\%$ ). + +For models, the gap is even more pronounced. For instance, Qwen2.5-VL-72B-Instruct achieves $58\%$ accuracy on hard questions with text options, but only $20\%$ when image + +![](images/3737d6ab696a3c310dab5c98fb390eedc23b20fda6553c6b5310acdd5f1eabeb.jpg) + +![](images/60a6175bfd05a6f7af089e7517aaca78aec0f7e4c49b2736be538da2e6e2dcfa.jpg) +Llama-3.2-90B-Vision-Instruct + +![](images/89b0feaa5116c74ddcd31055f2b342bfdeeb7a6e14a35f8811b249750b39baac.jpg) +Owen2.5-VL-72B-Instruct +Figure 12: Comparison of accuracy across different difficulty levels for human participants, one of the best performing proprietary model o1, the best performing Qwen-based open model Qwen2.5-VL-72B-Instruct, and the best performing Llama-based open model Llama3.2-90B-Vision-Instruct, measured on textual v.s. visual option types. + +![](images/ece4b18303218047608cad7c13202f459437b64ddffa0465d71243302f485618.jpg) + +options are used. o1 and Llama-3.2-90B-Vision-Instruct exhibit similar drops, suggesting a broad weakness in multi-image reasoning and visual option discrimination. These findings suggest that image-based answer options introduce significant additional complexity, requiring models not just to understand the question but to reason over multiple visual cues. This capability is essential for real-world tasks such as product selection, recommendation, and visual planning, where their decision-making process often depends on comparing visual content. + +However, most pretraining datasets and benchmarks have traditionally emphasized textual QA formats, with far fewer examples involving visual options or structured visual comparisons. As a result, models may lack the inductive bias or learned attention mechanisms to handle visual alternatives effectively. These results highlight an important direction for future work: expanding and diversifying training corpora to include multi-choice visual reasoning tasks, and developing architectures that are explicitly designed to process and compare visual candidates, especially under challenging conditions. + +# H.4 Case Study of Reasoning + +Figure 13 shows a case study demonstrating the similarity in structure and reasoning strategy between Claude-3.7-Sonnet and Claude-3.7-Sonnet-Thinking. Average textual similarity between model responses of these two models on VISUALPUZZLES is 0.9. + +# H.5 Impact of CoT + +Table 11 compares model performance under two prompting strategies: direct multiple-choice prompt vs. Chain-of-Thought (CoT) prompt. We observe that proprietary models and larger open models $(\geq 72\mathrm{B})$ benefit from CoT, while others show little to no improvement or even a decline in performance with CoT. For instance, both GPT-4o and Qwen2.5-VL-72B-Instruct show more than $20\%$ in + +
ModelDirectCoT
GPT-4o34.041.6
Gemini-1.5-Pro41.045.1
Claude-3.5-Sonnet40.042.5
Qwen2-VL-2B-Instruct31.326.1
Qwen2.5-VL-7B-Instruct33.732.0
Cambrian-13B27.426.5
LLaVA-NeXT-34B29.829.6
Qwen2.5-VL-72B-Instruct38.642.3
LLama-3.2-90B-Vision-Instruct33.333.9
+ +Table 11: Comparison of models with Direct Multiple Choice and CoT prompting. + +
Question: Given a pair of chairs arranged at a specific angle, +select the option from the four provided that matches the angle of +the chairs in the question. (Answer: C)Claude-3.7-Sonnet (With Thinking) +Let me analyze this problem carefully.
The question shows a “question pair” of chairs at the top of the image, and I need to find which of the four options (A, B, C, D) matches +the angle arrangement of those chairs.
Let me examine the question pair first:
- The question pair shows two folding chairs that appear to be facing inward toward each other at approximately a 90-degree angle (the +chairs are at roughly right angles to each other).
- The chairs in the question pair are shown separated by a colon “:” symbol.
Claude-3.7-Sonnet (Without Thinking) +I need to analyze the angle relationship between the pair of chairs +in the question and compare it with the four options provided. +Looking at the question image at the top: +- The first chair (on the left) is shown at an angle, with its side +profile partially visible +- The second chair (on the right) is shown more from the front, +facing more directly toward the viewer +- The chairs are positioned at roughly a 90-degree angle to each +other +Now examining each option: +Option A: Two simple chairs with backs. The left chair is shown +from a side angle, while the right chair is shown more from the +front. Their orientation appears to be at approximately a 90-degree +angle to each other. +Option B: Two backless chairs/stools. The left chair has an angled +design and is shown from the side, while the right chair is shown +more straight-on. The angle between them doesn’t appear to +match the 90-degree relationship in the question. +Option C: Two office/drafting chairs with wheels. Both chairs are +shown from similar angles, with only slight variation in their +orientation. They don’t appear to have the same 90-degree +relationship as the question chairs. +Option D: Two bar stools with round seats and circular bases. Both +are shown from similar angles with minimal difference in orientation. +They don’t demonstrate the 90-degree relationship seen in the +question. +Comparing all options to the question image, Option A most +closely matches the angular relationship between the two chairs in +the question. +Answer: ANow let me examine each option: +Option A: +- Shows two simple chairs with backrests +- The chairs are facing each other at approximately a 90-degree angle +- The arrangement looks similar to the question pair in terms of the angle
Option B: +- Shows two chairs with different designs - one appears to be a more angled/tilted chair and the other is more upright +- They don’t appear to be at the same angle relationship as the question pair
Option C: +- Shows two office/drafting chairs with wheels and height adjustment +- The chairs appear to be facing each other at approximately a 90-degree angle +- The arrangement looks similar to the question pair in terms of angle
Option D: +- Shows two bar-style chairs with round seats and central pedestals +- The chairs appear to be directly facing each other (180-degree relationship) +- This doesn’t match the angle relationship in the question pair
Based on the angle relationship, both Options A and C show chairs at approximately a 90-degree angle to each other, similar to the +question pair. However, Option A’s chair style and arrangement seems closer to the question pair’s folding chairs.
So, looking at the angle between the chairs, which is the key aspect of this question, Options A and C both show approximately +90-degree angles between chairs, while Option B shows chairs at different angles, and Option D shows chairs directly facing each other. +Since the question specifically asks about the angle match, I need to determine which option best matches the exact angle of the +question pair.
Looking closely again: +The question pair shows chairs that are facing slightly inward, at approximately 90 degrees. +Option A shows a similar angle relationship of roughly 90 degrees. +Option C also shows a similar angle relationship of roughly 90 degrees. +This is a bit challenging with the 2D representation, but I think Option A most closely matches the angle relationship in the question pair.
Answer: A
+ +Figure 13: Case Study showing the similarity in structure and reasoning strategy between Claude-3.7-Sonnet-Thinking and Claude-3.7-Sonnet. Similarity between model responses of these two models on VISUALPUZZLES is 0.9. + +creases in performance when using CoT. In contrast, several smaller models, such as Qwen2-VL-2B-Instruct and Cambrian-13B, exhibit decreased accuracy with CoT prompting. These results suggest that CoT can indeed enhance the reasoning capability of larger models whereas it may introduce unnecessary complexity or confusion for smaller models and thus decreasing performance. + +# I Case Study + +# Algorithmic (Easy) + +![](images/424714a384e3ef3140a976266073aef801742fb243abbc70eecfcc5dd6030446.jpg) + +![](images/6ff5da0dfd71ea7192bbc4b256b148718f303bd5f12d4f39bba8c9a93445e556.jpg) +Figure 14: A sample error case of Algorithmic Reasoning (difficulty: Easy). + +Question: There are seven squares in the picture. How many more triangles than squares are there in the picture? + +Options: (A) 1 (B) 2 (C) 3 (D) 4 + +Ground Truth: (C) + +Claude-3.7-Sonnet-Thinking: I need to count the number of squares and triangles in the given picture, and then find how many more triangles there are than squares. + +First, let me count the squares: + +1. I can see 7 small squares arranged in a triangular pattern within the large triangle. + +Now, let me count the triangles: + +1. There's the large outer triangle that contains everything. + +2. There are small triangles formed by the division of the large triangle by the squares. + +Let me count these small triangles carefully: +- Along the bottom row, I see 4 triangles (formed by the squares and the bottom edge) +- On the left side of the large triangle, I see 3 triangles (formed by the squares and the left edge) +- On the right side of the large triangle, I see 3 triangles (formed by the squares and the right edge) +- Don't forget the large outer triangle itself: 1 triangle + +So in total: $4 + 3 + 3 + 1 = 11$ triangles + +Now I can find the difference: + +Number of triangles - Number of squares $= 11 - 7 = 4$ + +So there are 4 more triangles than squares in the picture. + +Looking at the options, the answer is (D) 4. + +# + +Looking at the image, I need to count the total number of squares and triangles, then find their difference. + +First, I'll count the squares: + +- I can see 7 small squares arranged in a triangular pattern inside the large triangle. + +Next, I'll count the triangles: + +1. There is the large outer triangle that contains everything. +2. Along the bottom edge of the large triangle, there are 4 small triangles. +3. Along the left edge of the large triangle, there are 3 small triangles. +4. Along the right edge of the large triangle, there are 3 small triangles. + +Total number of triangles $= 1 + 4 + 3 + 3 = 11$ triangles + +Now I can calculate how many more triangles than squares there are: + +11 triangles - 7 squares = 4 more triangles + +Answer: D + +Error Category: Reasoning Error + +Error Reason: The model gets the answer wrong because it miscounts the number of triangles, specifically double-counting the triangles formed by small triangles and squares. The response incorrectly conclude there are 11 triangles, but a careful manual inspection shows only 10 distinct triangles, making the correct difference 3, not 4. + +# Algorithmic (Easy) + +![](images/61f9deaf7571114247539cebe087ca5c23f67840862b57d8b656849458a44f08.jpg) + +![](images/7461112c409fd02eba1473e6933495500624a55d6357cb99ddfdfec1549ea9f6.jpg) +Figure 15: A sample correct case of Algorithmic Reasoning (difficulty: Easy). + +![](images/fb31c8ae8c6c1b5b7fe9668564add2294271e6b74599c26aa0c6c7eea16db2e7.jpg) +(B) + +![](images/2d3a21ad802b33bf71c70e0abd227d85a77a0230e56bf9bc88272a39454b0e56.jpg) + +![](images/b3960c57aa37463f6a6aae7ee509937f01607f72a797a14974a5afe562bad84c.jpg) +(D) + +Question: In the Kangaroo constellation, all stars have a number greater than 3 and their sum is 20. Which option shows the Kangaroo constellation? + +Ground Truth: (B) + +Claude-3.7-Sonnet-Thinking: The question asks about the Kangaroo constellation, which has the following properties: + +1. All stars have a number greater than 3 +2. The sum of all numbers is 20 + +Let me check each option to see which one satisfies these conditions: + +# Option (A): + +Stars have values: 5, 4, 7, 6 + +- Are all numbers greater than 3? Yes (5, 4, 7, 6 are all $>3$ ) +- Sum: $5 + 4 + 7 + 6 = {22}$ + +This doesn't match the required sum of 20. + +# Option (B): + +Stars have values: 5, 8, 7 +- Are all numbers greater than 3? Yes (5, 8, 7 are all $>3$ ) +- Sum: $5 + 8 + 7 = {20}$ + +This matches both conditions: all numbers are greater than 3, and they sum to 20. + +# Option (C): + +Stars have values: 5, 3, 7, 2, 8 + +- Are all numbers greater than 3? No (3 and 2 are not $>3$ ) +- Sum: $5 + 3 + 7 + 2 + 8 = {25}$ + +This doesn't satisfy the first condition about all stars having numbers greater than 3. + +# Option (D): + +Stars have values: 5, 1, 4, 9 + +- Are all numbers greater than 3? No (1 is not $>3$ ) +- Sum: $5 + 1 + 4 + 9 = {19}$ +This doesn't satisfy either condition. + +So, only Option (B) satisfies both conditions of the Kangaroo constellation. + +# + +Solving the Kangaroo Constellation Problem + +I need to identify the constellation where all stars have numbers greater than 3 and sum to 20. + +Let me examine each option: + +# Option (A) + +Stars: 5, 4, 7, 6 + +- All numbers $> 3$ ? Yes +- Sum: $5 + 4 + 7 + 6 = {22}$ + +Doesn't match the required sum of 20. + +# Option (B) + +Stars: 5, 8, 7 + +- All numbers $> 3$ ? Yes + +- Sum: $5 + 8 + 7 = {20}$ + +Matches both requirements! + +# Option (C) + +Stars: 5, 3, 7, 2, 8 + +- All numbers $> 3$ ? No (3 and 2 are $\leq 3$ ) +- Sum: $5 + 3 + 7 + 2 + 8 = 25$ + +Doesn't satisfy the first condition. + +# Option (D) + +Stars: 5, 1, 4, 9 + +- All numbers $> 3$ ? No (1 is $\leq 3$ ) + +- Sum: $5 + 1 + 4 + 9 = {19}$ + +Doesn't satisfy either condition. + +Only option (B) satisfies both requirements: all stars have numbers greater than 3, and their sum is exactly 20. + +Answer: B + +# Algorithmic (Medium) + +![](images/00e50a1568abcd54465fc320e5e371cbde25a42d012dacc8fef4c74e1687b8cd.jpg) + +![](images/33212890af4e16181bfc285e43be24966ae928823fec25db8cb92706b2348fe9.jpg) +Figure 16: A sample error case of Algorithmic Reasoning (difficulty: Medium). + +Question: How many left hands appear in the image? + +Options: (A) 1 (B) 8 (C) 4 (D) 5 + +Ground Truth: (C) + +# Claude-3.7-Sonnet-Thinking: + +Looking at the image, I can see approximately 10-12 hand icons/emoji in a row. They're all yellow (with one that appears to be skin-colored), and they show various hand gestures. + +To determine which ones are left hands, I need to look at the thumb position. In a left hand, the thumb would typically be on the right side when the palm is facing forward. + +Let me go through each hand from left to right: + +1. Thumbs down gesture - appears to be a left hand with thumb on right side +2. Fist or closed hand - orientation is harder to determine but seems to be a left hand +3. Open hand or waving hand - appears to be a left hand +4. Hand pointing or gesturing - appears to be a left hand +5. Open hand with fingers spread - appears to be a left hand +6. Fist with extended thumb (like "thumbs up" but horizontal) - appears to be a right hand (thumb on left) +7. Open hand with fingers spread - appears to be a left hand +8. Hand making "OK" gesture - appears to be a left hand +9. Victory/peace sign hand - appears to be a left hand +10. Ear or side of face (not a hand) +11. Hand pointing or gesturing - appears to be a right hand + +So counting the left hands, I see approximately 8 left hands. This would correspond to option (B) 8. + +However, I want to be careful because determining left vs. right can sometimes be ambiguous in emoji or simplistic hand representations. Let me double check my count. + +Looking again, I see the thumbs down, several open hands, some gesture hands, and a few that are definitely left hands based on thumb position. The total count of left hands appears to be around 8, though there's some ambiguity with a couple of the representations. + +Based on my analysis, the answer appears to be (B) 8. + + + +To solve this question, I need to examine the image carefully and identify which hands are left hands. + +Looking at the row of yellow hand emojis, I need to determine whether each is a left or right hand. For hand emojis, I can identify a left hand by noticing the thumb position - on a left hand, the thumb will typically appear on the right side when the palm is facing forward. + +Going through each emoji from left to right: + +1. Thumbs down - this is a left hand (thumb is on the right) +2. Fist - appears to be a left hand +3. Waving hand - this is a left hand +4. Pointing hand - this is a left hand +5. Open hand with fingers spread - this is a left hand +6. Fist with extended thumb - this appears to be a right hand (thumb on left) +7. Open hand - this is a left hand +8. OK gesture hand - this is a left hand +9. Peace sign hand - this is a left hand +10. Ear emoji (not a hand) +11. Pointing gesture - right hand + +I count 8 left hands total in the image. + +Answer: B + +# Error Category: Spatial / Orientation Error + +Error Reason: The model makes a orientation error by misinterpreting the thumb positions in several hand emojis, leading it to incorrectly classify some right hands as left hands. For example, it counts the pointing hand in the third emoji (which is clearly a right hand) as a left hand. This misclassification results in an overcount of left hands. + +# Algorithmic (Medium) + +![](images/1ab5d39c309739928dbdbad3db6a7908d5d62c04378b3c3dfc2525e0114c5e01.jpg) +Figure 17: A sample correct case of Algorithmic Reasoning (difficulty: Medium). + +Question: The entire pie seen in the image is divided among several children. Each child receives a piece of pie, and each piece of pie looks identical. What is the maximum possible number of children there? Options: (A) 9 (B) 8 (C) 10 (D) 4 + +Ground Truth: (B) + +Claude-3.7-Sonnet-Thinking: I need to determine how many equal-sized pieces this pie can be divided into. Since each child will receive an identical piece, I need to count the maximum number of equal portions visible in the image. + +Looking at the image, I can see that this appears to be an unusual-looking pie with different colored sections arranged in a circular pattern. It's not a typical pie with simple wedge-shaped slices. + +From what I can observe, there appears to be a pattern of colored shapes (green, light green, blue, and red) arranged in a circle. To determine how many equal pieces there are, I need to identify the repeating pattern and count how many times it repeats around the circle. + +The pie seems to have a symmetrical arrangement. Looking carefully, I can see that there appear to be 8 sections around the outer ring that follow a similar pattern. I can see repeated patterns of green and light green shapes with blue and red accents between them. + +Looking at the options: + +(A) 9 +(B) 8 +(C) 10 +(D) 4 + +Based on my observation of the image, there appear to be 8 similar sections arranged around the circle. This would suggest that option (B) 8 is the correct answer. + +However, I should double-check by looking for any other patterns or symmetries in the image. It's possible that each section could be further divided, or that there's a different way to interpret the equal pieces. + +After careful examination, I'm confident that there are 8 identical sections arranged in a circular pattern. Therefore, the maximum number of children who could receive identical pieces of pie would be 8. + + + +Determining the Maximum Number of Children + +To solve this problem, I need to examine the pie in the image and determine how many identical pieces it can be divided into. + +Analysis of the Pie + +Looking at the image, I can see a circular pie with a pattern of colored sections. The key is to identify how many identical sections are present or could be created. + +Upon careful examination, I notice that the pie has a symmetrical arrangement with a repeating pattern. The colorful segments (green, light green, red, and blue shapes) appear to form a consistent pattern around the circle. + +Counting these repeating sections carefully, I can identify 8 identical portions around the circumference of the pie. Each of these sections follows the same pattern of colored shapes. + +Conclusion + +Since each child must receive an identical piece, and there are 8 identical sections visible in the pie, the maximum possible number of children would be 8. + +Answer: B + +# Algorithmic (Hard) + +![](images/927d08a0f0f6c9898c84461b46688bcabf55b744090097079b653255cfc913a2.jpg) + +![](images/090c5d20e7dbcee00cc16fc3ac0259c5d9be49d302ec9a733870fb9c076fd3e8.jpg) +Figure 18: A sample error case of Algorithmic Reasoning (difficulty: Hard). + +Question: There are 53 residents living in the apartment. How many people live on the second floor? + +Options: (A) 5 (B) 8 (C) 10 (D) 13 + +Ground Truth: (A) + +Claude-3.7-Sonnet-Thinking: This image shows an apartment building with different sections labeled with numbers. Let me analyze what the numbers represent: + +- The building appears to have multiple floors. +- The number 27 is shown on the left side of one section (blue). +- The number 23 is shown on the right side of a section (also seems to cover two blue floors). +- The number 18 is shown on the left side of a pink/red section marked with "?" +- The number 13 is shown on the right side of the bottom floor. + +The question asks how many people live on the second floor, but I need to determine which floor is the second floor and how many people live there. + +Looking at the building from bottom to top: + +1. Bottom floor (blue with windows): labeled with 13 on the right +2. Second floor (pink/red with "?"): labeled with 18 on the left +3. Two middle floors (blue with windows): part of the 23 on the right +4. Top floor (blue with windows): labeled with 27 on the left + +So it seems the second floor from the bottom is the one marked with $?^{\prime \prime}$ and has 18 on the left side. But the question asks specifically how many people live on the second floor, not how many are associated with the label. + +Let me think about this more carefully. The total number of residents is 53. The numbers shown might be: + +- 27: Number of people on the top floor +- 23: Number of people on the two middle floors combined +- 18: Number of people on the second floor +- 13: Number of people on the bottom floor + +Let's check if these add up: $27 + 23 + 18 + 13 = 81$ , which is more than 53. + +Wait, I think I'm misinterpreting. The numbers might represent: + +- 27: People on left side (top section) +- 23: People on right side (upper two floors) +- 18: People on left side (lower section) +- 13: People on right side (bottom floor) + +So $27 + 18 = 45$ people on the left, and $23 + 13 = 36$ people on the right. That's still $45 + 36 = 81$ people, which doesn't match 53. + +Let me try a different approach. Maybe the 23 represents the number of people on the right side of the building (covering multiple floors), and 18 represents people on another part. + +Actually, looking at the diagram again, it appears that: + +- 27: Left side of the top section +- 23: Right side of the top two sections +- 18: Left side of the bottom section +- 13: Right side of the bottom section + +Given the total is 53 residents, and we're trying to find the number on the second floor, let's try to interpret the diagram differently. + +The first (bottom) floor seems to have 13 people. + +The top appears to have 27 people. + +That leaves $53 - 13 - 27 = 13$ people for the second and third floors combined. + +I'm still uncertain, but based on my calculations and the available options, it seems like (D) 13 might be the answer if we interpret the second floor as having all the remaining residents after accounting for the top and bottom floors. + +Solving the Apartment Residents Problem + +Looking at the image, I need to determine how many residents live on the second floor of this apartment building. + +I can see a diagram of a building with several numbers: + +- The top section has "27" on the left side +- The middle two floors have "23" on the right side +- There's a section marked with "18" on the left. +- The bottom floor has "13" on the right side +- The second floor from the bottom is marked with a "?" - this is what I need to find + +I notice the question states there are 53 residents total living in the apartment building. + +Let me work this out: + +- The top floor appears to have 27 residents +- The bottom floor appears to have 13 residents +- That means the middle two floors together must have: ${53} - {27} - {13} = {13}$ residents + +Looking closely at the image, the second floor is specifically marked with a "?" and there are numbers on the sides indicating 13 for that floor. + +Therefore, the second floor has 13 residents. + +Answer: D + +Error Category: Understanding Error + +Error Reason: The model makes an image understanding error by misunderstanding what is the meaning of the brackets and numbers, despite all the branching and revalidation. This misunderstanding causes it to iteratively try to understand the meaning of the numbers and ultimately select the wrong answer. + +# Algorithmic (Hard) + +![](images/734da95c949dd5902eb030ed4246a550a978e842247fed2f79fc705914fc1123.jpg) + +![](images/85d2f65accb24a4a7760e618083811214a75b10243babb7aedceef3ce06aab19.jpg) +Figure 19: A sample correct case of Algorithmic Reasoning (difficulty: Hard). + +Question: A bird can only move horizontally or vertically towards a monkey in the grid. The number of ways that the bird can reach the monkey is: + +Options: (A) 15 (B) 14 (C) 18 (D) 12 + +Ground Truth: (A) + +Claude-3.7-Sonnet-Thinking: Looking at the grid, I need to identify the positions of the bird and the monkey. + +The bird appears to be at the top of the grid in the second column, specifically at position $(0,1)$ if we use a zero-indexed grid counting from the top-left. + +The monkey appears to be at the bottom-right of the grid, at position $(4,3)$ using the same coordinate system. + +To count the number of ways the bird can reach the monkey, I need to consider that the bird can only move horizontally or vertically. This means the bird can only move right or down to reach the monkey from its starting position. + +The total number of valid paths will be the number of ways to arrange the required horizontal and vertical moves. + +From position $(0,1)$ to $(4,3)$ , the bird needs to move: + +- 4 steps down (from row 0 to row 4) +- 2 steps right (from column 1 to column 3) + +So in total, the bird needs to make $4 + 2 = 6$ moves, with 4 being vertical and 2 being horizontal. + +The number of ways to arrange these moves is given by the combination formula: + +C(total moves, horizontal moves) = C(6, 2) = 6! / (2! * 4!) = (6 * 5) / (2 * 1) = 30 / 2 = 15 + +Alternatively, you can calculate $C(6, 4)$ which would give the same result: + +$$ +C (6, 4) = 6! / (4! * 2!) = 1 5 +$$ + +So there are 15 different ways for the bird to reach the monkey. + +Looking at the options, this corresponds to answer (A) 15. + + + +Solving the Grid Path Problem + +In this problem, I need to find the number of ways that a bird can reach a monkey by moving only horizontally or vertically in a grid. + +Looking at the image: + +- The bird is located at the top of the grid in the second column (position (0,1) if we start counting from 0) +- The monkey is located at the bottom-right area (position (4,3)) + +Step 1: Determine the required moves + +From the bird to the monkey, the bird needs to move: +- 4 steps down (from row 0 to row 4) +- 2 steps right (from column 1 to column 3) + +So the bird must make a total of 6 moves: 4 vertical and 2 horizontal. + +## Step 2: Calculate the number of possible paths + +This is a classic combinatorial problem. The number of different ways to arrange these moves is: + +C(total moves, horizontal moves) = C(6,2) = 6! / (2! $\times$ 4!) + +$$ +\begin{array}{l} = (6 \times 5) / (2 \times 1) \\ = 3 0 / 2 \\ = 1 5 \\ \end{array} +$$ + +Alternatively, we could calculate $\mathrm{C}(6,4)$ which gives the same result. + +## Step 3: Verify with the given options + +Our answer of 15 matches option (A). + +Answer: A + +# Analogical (Easy) + +![](images/e518d4acbe9c161cd715fed617ffb95611fdb16cf38907210ed5d5f595521048.jpg) + +![](images/50a2de794b432270d8a6b8a4ca03979f06543861e5246d77de2f2322303eec57.jpg) +Figure 20: A sample error case of Analogical Reasoning (difficulty: Easy). + +![](images/a77154c58642491a23c268c4034f10ad4af28ba62a0d416a6bd8022d2dd274b2.jpg) + +![](images/cb35d65666a6596b675ccdc24c9b37f588de9281849111d31cf59dc8561dd8a1.jpg) + +![](images/22e8f2fd45414ea0c02565acbba730dd18db188566730e74d38f7073ac825006.jpg) + +![](images/2c357401c5509f0dfd36a58990216a6180a467958210780c06b3d61a2f2e3160.jpg) + +![](images/37364e6ffedfd53d5cf11984921f5c210b210bd629162fe84fa6477d697c9de7.jpg) + +![](images/0920247e737549ac6c013dd4ad786a1d0456780992c80a82459f78cbac53edd4.jpg) + +![](images/df192506b0a870a0c7543522cc6648f7a5cebc30bdfbff972c7e7ef764491a13.jpg) + +![](images/27c1ab9e19183598421b47bc2b2cde9cfa0eca5758064122469fefc4dd9aa1ec.jpg) + +![](images/b9bd0cb26aec3f688bb02c915e6e3c6a947257aafe514b16d19b7342827dc8a7.jpg) + +![](images/eb994d835f7c12f06003e9f62bc37518d1ff14925e962e33d938b47b4b9b5b19.jpg) + +![](images/d16e3f610ac02e622ac34890433e4efd1957e013f904088e7fb2cd385a5816c5.jpg) + +Question: Given the pattern in the first line of blocks, which option at the bottom of the image fits in the question mark in the second line of blocks? + +Ground Truth: (C) + +Claude-3.7-Sonnet-Thinking: Let me analyze the pattern in the first line of blocks and see how it relates to the second line of blocks. + +First line of blocks: + +1. The first block has white on the left and blue on the right + +2. The second block has white on the top and blue on the + +3. The third block has blue on the left and white on the right + +Second line of blocks: + +1. The first block has black on the top and yellow on the bottom +2. The second block has yellow on the left and black on the right +3. The third block has a question mark (this is what we need to figure out) + +Let me think about the possible relationships between the first and second line: + +Possibility 1: Color transformation + +- Blue in the first line becomes black in the second line +- White in the first line becomes yellow in the second line + +Possibility 2: Position pattern + +The first line blocks follow a pattern of right/bottom/left coloring for the blue part. + +The second line blocks might follow the same positional pattern but with different colors. + +Based on the color transformation pattern I identified (blue→black, white→yellow), and looking at the third block in the first line (blue left, white right), the answer should be a block with black on the left and yellow on the right. + +Looking at the options: + +- Option A: Black on top, yellow on bottom (like the first block in the second row) +- Option B: White on left, pink/red on right (doesn't match our color scheme) +- Option C: Yellow on top, black on bottom (this would be inverse of first block in second row) +- Option D: Blue on bottom, white on top (similar to the second block in the first row) + +None of the options exactly match what I'm looking for. The relationship between the first line and second line might be more complex. Let me look at it again: + +First block: white left, blue right $\rightarrow$ black top, yellow bottom + +Second block: white top, blue bottom $\rightarrow$ yellow left, black right + +Let me try to formulate a rule: +-White areas become yellow +- Blue areas become black +- Position changes: left→top, right→bottom, top→left, bottom→right + +So if the third block has blue left and white right, then the third block in the second row should have black top and yellow bottom according to this rule. + +Option A matches this pattern. + +Let me double-check: + +First rule pattern: white→yellow, blue→black + +Position transformation: left $\rightarrow$ top, right $\rightarrow$ bottom, top $\rightarrow$ left, bottom $\rightarrow$ right + +For block 3 in row 1: blue on left, white on right + +Applying the rules: blue left becomes black top, white right becomes yellow bottom + +This gives us: black top, yellow bottom + +Option A has black on top and yellow on the bottom, so it seems to be the correct answer. + + + +To solve this problem, I need to identify the pattern in the first line of blocks and how it transforms in the second line. + +Let me analyze both rows carefully: + +First row: + +- Left block: White on left, blue on right + +- Middle block: White on top, blue on bottom + +-Right block:Blue on left, white on right + +Second row: + +- Left block: Black on top, yellow on bottom +- Middle block: Yellow on left, black on right +- Right block: ? (what we need to determine) + +Looking at the transformation from first to second row: + +1. The blue areas in the first row seem to become black in the second row +2. The white areas in the first row seem to become yellow in the second row +3. There's also a positional transformation: +- Left side $\rightarrow$ Top position + +-Right side $\rightarrow$ Bottom position +-Top position $\rightarrow$ Left side +- Bottom position $\rightarrow$ Right side + +Option A shows black on top and yellow on bottom, which matches our expected pattern. + +Answer: B + +# Error Category: Reasoning Error + +Error Reason: The model gets this question wrong because it focuses too much on individual color mappings or partial position patterns rather than identifying the full transformation. The correct pattern involves a consistent clockwise rotation of color positions from the first to the second row. + +# Analogical (Easy) + +![](images/61cad466b49690891b856ba14cc70440f36a35be4a0a036b722eeab6b87a80a2.jpg) +Figure 21: A sample correct case of Analogical Reasoning (difficulty: Easy). + +![](images/10bed72f87c8e193995a37a5c5e14a781a3051a3d0f0558297a496509e6fabfd.jpg) + +![](images/d7598ff388d40ce2b7daff4e08614c80aa41b7b49ae51fea453b622c3c9556ad.jpg) + +![](images/a60ad6a9cc833737461a840efa4f6a01f8ba6ccbb0a93d01b817ef50dc45e361.jpg) + +![](images/ecf21b7d9cbd5dd7fe914185b4dd246cda08fcb78323f43b632ec0f01026af01.jpg) +B + +![](images/56840636c47c1f5df7225bd5590c34bae5d99f28eed1a5afd5b2806ace902ec9.jpg) +C + +![](images/b93bf93c475a4531b9fbb26b1242800aad6dc72e38106580e5a55971721ed1bd.jpg) +D + +Question: Given the analogy in the first line of items, which choice fits best in the black question mark? + +Ground Truth: (C) + +Claude-3.7-Sonnet-Thinking: Let's analyze the analogy in the first line: + +The first line shows sliced bread on the left and a knife on the right, connected by an arrow. + +The relationship here appears to be: the knife is a tool used to cut/slice the bread. The knife is what transforms a whole loaf of bread into slices. + +Now, let's look at the second line: + +- On the left, we see a log (a section of tree trunk) + +- On the right, we need to determine which tool would be used to process or cut this log + +Let's examine each option: + +(A) A wooden shed or small building +(B) A cleaver or butcher's knife +(C) An axe +(D) Sliced butter on a wooden board + +The question is asking which tool would be used to process a log, similar to how a knife is used to slice bread. + +- Option A (wooden shed) is not a tool but a structure that might be built using processed wood +- Option B (cleaver/butcher's knife) is a cutting tool but not typically used for logs +- Option C (axe) is a tool specifically designed for cutting logs/wood +- Option D (sliced butter) is not a tool at all, but a food product + +Following the analogy "bread $\rightarrow$ knife" (something that needs to be cut $\rightarrow$ tool used to cut it), the logical completion for "log $\rightarrow$ ?" would be an axe, which is a tool specifically designed to cut wood. + +Therefore, the answer appears to be (C) the axe. + + + +I need to determine the relationship between the items in the first line and then apply the same relationship to find the missing item in the second line. + +Looking at the first line: + +- On the left, there's a loaf of bread with some slices cut from it +- On the right, there's a knife +- The relationship is that the knife is the tool used to cut/slice the bread + +Now, for the second line: + +- On the left, there's a log (a section of tree trunk) +- On the right, I need to find the appropriate tool that would be used to cut or process this log + +Let me analyze each option: + +(A) A wooden shed/small building - This is a structure that might be built using processed wood, not a tool for cutting logs +(B) A cleaver/butcher's knife - While this is a cutting tool, it's not typically used for logs +(C) An axe - This is a tool specifically designed for chopping and cutting logs/wood +(D) Sliced butter on a board - This is a food product, not a tool for processing logs + +The pattern is "material $\rightarrow$ tool used to cut that material" + +-Bread $\rightarrow$ Knife +- $\mathrm{Log}\rightarrow ?$ + +An axe is the tool specifically designed for cutting logs, just as a knife is the tool for slicing bread. + +Answer: C + +# Analogical (Medium) + +![](images/b175172cabef3457d3fbf51009db02477dbf908a85f4f7569d8011b8942a5875.jpg) + +![](images/b7d7d1a6defad214c382f124a73a41a9d9160dcaaf744d712a10ca291058ec55.jpg) + +![](images/d20318dee2d84b8a10a978ba5d30a8fbe102063bc9651d8ef76d1f77c1d7ef91.jpg) +: + +![](images/c92374dde78af9c0322744c19117f204b609d167b4de3d59228ed153f09a6a03.jpg) + +![](images/7ee0bf8793fb3d09c02768e277095ab8284e9064c0e822e409949164f9d52fe8.jpg) + +![](images/34dd8ccdd69dd7fd4e38f6df7a7500a4d561d79b031d7cf70a372c6481e767f5.jpg) +Figure 22: A sample error case of Analogical Reasoning (difficulty: Medium). + +![](images/784558a7f58d3124ed539611b33a54af63298c5695a8d03677d909c7112a73fd.jpg) +Candidates + +![](images/5c75fc66ff632914871c1c14e47d85c5dbae9791373244a7835cb9bafd5595e0.jpg) + +![](images/2d8c23c6642e4fdc059fce954cbe74392aa42e50ac339d36ec570b4538d1b9cd.jpg) + +Question: Based on the relationship between Origin and Analogy, select the option from the candidates that best completes this analogy. + +Ground Truth: (C) + +Clae3-7-Sonnt-Thinkin: thinkin> h i th f th t, and then identify which candidate option (A, B, C, or D) best completes the right side of the equation when paired with "Origini". + +Let's analyze the images: + +Left side: + +- Origin: A person installing or working on what appears to be a large appliance or system + +- Analogy: A person installing or repairing what looks like a garage door opener or ceiling-mounted device + +Right side: + +- Origin: A person's hand operating what appears to be a sound mixing board or control panel with sliders and buttons + +- Analogy': This is what we need to determine from options A, B, C, or D + +The relationship between Origin and Analogy seems to be: + +Both involve installation, repair, or maintenance of mechanical/electrical systems, but in different contexts. They show people actively working on or operating different types of equipment or machinery. + +Now, let's look at the candidate options and see which one best represents the same relationship with Origin' (the sound mixing board): + +A: A person painting or working on a ceiling/wall + +B: A close-up of someone working on or repairing what appears to be electronic components + +C: Some mechanical or electronic equipment with wiring + +D: People moving or installing what looks like a home appliance (possibly a refrigerator or washer/dryer) + +I need to determine which option shows a relationship to the sound mixing board (Origin') that parallels the relationship between the first two images. + +If Origin shows someone installing/repairing an appliance, and Analogy shows someone installing/repairing a different type of equipment (garage door opener), then the relationship is about installation/repair of different types of systems. + +Similarly, if Origin' shows someone operating a sound mixing board, then Analogy' should show someone operating a different type of equipment or control system. + +Looking at the options: + +A: Shows installation/repair work (similar to Origin/ Analogy) but not operation of controls + +B: Shows repair/maintenance of electronic components + +C: Shows equipment but not clearly someone operating it. + +D: Shows people moving/installing an appliance + +The key relationship between Origin and Analogy seems to be: a person actively working on installing or maintaining a system. Similarly, Origin' shows a person + +actively operating a control system (mixing board) + +Option B seems closest as it shows hands working on electronic components, which could be considered operating or maintaining another type of electronic system. Though it's not entirely clear from the image, this appears to be the closest match to the relationship pattern. + + + +Analogy Analysis + +I need to understand the relationship between "Origin" and "Analogy" in the first pair, then apply that same relationship to find what completes the "Origin" and "Analogy" pair. + +Examining the first pair: + +- \*\*Origin\*\*: Shows a person installing or repairing what appears to be a large appliance (possibly a washer or dryer) + +- \*\*Analogy\*\*: Shows a person installing or working on a ceiling-mounted device (appears to be a garage door opener) + +The relationship seems to be: Both involve installation/repair/maintenance of household mechanical systems, but in different locations and of different types. + +Examining the second pair: + +- \*\*Origin\*\*: Shows hands operating a sound mixing board or audio control panel with sliders and buttons + +- \*\*Analogy\*\*: This is what we need to determine + +I need to find an option that shows someone working with or operating another type of electronic/technical equipment, maintaining the same relationship as the first pair. + +Analyzing the options: + +- $\mathrm{^{**}A^{**}}$ : Person painting or installing something on a ceiling + +- $\mathbf{\Pi}^{**}\mathbf{B}^{**}$ : Close-up of hands working on electronic components/circuit board + +- $\mathbf{\ddot{C}}^{\star \star}$ : Complex mechanical/electronic equipment with wiring + +- $\mathbf{^{**}D^{**}}$ : Two people moving or installing what appears to be a refrigerator or appliance + +The relationship we're looking for should show hands working on/operating a different type of technical equipment than a mixing board. + +Option B shows hands working directly with electronic components/circuit board, which parallels how the hands in Origin are working directly with the audio equipment. Both involve hands-on technical work with electronic systems, just different types. + +Answer: B + +# Error Category: Reasoning Error + +Error Reason: The model gets the answer wrong because it overemphasizes the type of equipment being worked on (mechanical vs. electronic) rather than the specific action and interface involved in the analogy. Option C correctly mirrors the using / fixing relationship seen in the first pair of analogy, while Option B lacks this direct fixing interaction, despite a similar technical context. + +# Analogical (Medium) + +![](images/257c05296d2ad268a2d7c6e59b7a42d0258eec902ec9535ec6137f13a615e6da.jpg) + +![](images/ab47e0324a8f3696477f82c394854dd355acec65de459490a80c26dc69f8f9ff.jpg) + +![](images/7f678c3abec399cf19f625891724b356cd35e119f8affb3fdaca193802247dc2.jpg) + +![](images/85fc0acebdc041898bc46ceeeb24682fb59f7759acc9e281093e3a19ec0fdb83.jpg) + +![](images/ad4b61a98884381daa9f1303c0f51f059c4fb92172fa528912856cb90462826f.jpg) + +![](images/0c9852c365df3b1611c71ccabfed0cc26a574ebf7515ee1488d453c71195d0e9.jpg) + +![](images/0ee04f55113fc5aace0ec41a462c21b3615295a732c880c148e7b6f41902a4d8.jpg) +Candidates + +![](images/610adf6221d802cd88a3f2515ae82304ccb0bf5468106f467cd6d6c333f8fa7f.jpg) +Figure 23: A sample correct case of Analogical Reasoning (difficulty: Medium). + +![](images/e8a8f856fd35aa28d63335409c3b7e168800d1baf1d9d81477bd486902989eda.jpg) + +Question: Based on the relationship between Origin and Analogy, select the option from the candidates that best completes this analogy. + +Ground Truth: (B) + +Claude-3.7-Sonnet-Thinking: Let me analyze the relationship between the "Origin" and "Analogy" images to determine the pattern, and then apply that to find the best match for "Origin" from the candidates. + +In the first pair: + +- Origin: A dog (looks like a spaniel or similar breed) searching/sniffing in grass +- Analogy: What appears to be a small animal (possibly a rodent) in its natural habitat with vegetation + +In the second pair: + +- Origin': A dog (appears to be a small white dog) eating or drinking from a bowl/dish +- Analogy': We need to determine this + +The relationship seems to be showing an animal (dog) in the Origin image, then showing what might be a prey animal or similar smaller animal in a related natural context in the Analogy image. + +So, for Origin' which shows a dog eating/ drinking, the Analogy' should show some smaller animal also eating. + +Looking at the candidates: + +A: Shows a dog with what appears to be a stick or something in its mouth in snow. +B: Shows a mouse or small rodent eating something +C: Shows what looks like a groundhog or marmot near flowers/plants +D: Shows another dog's face/profile + +Based on the pattern (dog $\rightarrow$ smaller animal in natural context, especially related to the activity the dog is doing), option B (the mouse/rodent eating) would best complete the analogy since it shows a smaller animal doing the same activity (eating) as the dog in Origin! + + + +I need to determine the relationship between the "Origin" and "Analogy" images, and then apply that same relationship to find the appropriate "Analogy"" that matches "Origin"" + +Looking at the first pair: + +- Origin: A dog searching or sniffing in grass (appears to be hunting or tracking) +- Analogy: A small animal (looks like a rodent) in its natural habitat among vegetation + +Now looking at the second pair: + +- Origin: A dog eating or drinking from a bowl/dish +- Analogy': This is what I need to determine + +The relationship between the first pair seems to be: a dog engaged in a natural behavior (hunting/tracking), followed by what could be its potential prey or a similar smaller animal in its natural environment. + +Following this pattern, the "Analogy" should show a smaller animal engaged in eating or drinking (similar to what the dog is doing in "Origin"). + +Examining the candidates: + +A: A dog with something in its mouth on snow +B: A mouse/small rodent eating food +C: What appears to be a groundhog or marmot among vegetation/flowers +D: A profile view of another dog + +Since the pattern is showing a smaller animal doing a similar activity as the dog, option B (the mouse eating) best completes the analogy because it shows a smaller animal engaged in the same activity (eating) as the dog in Origin! + +Answer: B + +# Analogical (Hard) + +![](images/783d6eab2c018327ef49d4996b35460aeb2b724d7e4d20b7f7afa306007e0475.jpg) + +![](images/37189ecbd81689a89efc1f56cd6a8ccc393d42226fa9319b2192c3ead6b3c5f1.jpg) +Figure 24: A sample error case of Analogical Reasoning (difficulty: Hard). + +Question: This question contains two sets of figures and 4 figures to choose from. The two sets of figures have certain similarities as well as differences. You are required to choose one option that is most suitable to replace the question mark. The correct answer should not only make the two sets of figures the most similar, but also shows uniqueness for the second set of figures. + +# Ground Truth: (A) + +Claude-3.7-Sonnet-Thinking: Let me analyze this question carefully. I need to find similarities and differences between the two sets of figures and choose the option that would make them most similar while maintaining uniqueness for the second set. + +The first set of figures consists of: + +1. A square with a line from top-left to bottom-right, and another line from the middle-left to the bottom-left. +2. A square with zigzag lines. +3. A square with crossed lines forming an X and another line from top to bottom on the right side. + +The second set of figures consists of: + +1. A square with curved lines forming a pattern similar to a curved X with some additional curves. +2. A square with a circle and curved lines inside. +3. The question mark - this is what I need to determine. + +Now let's look at the possible options for the third figure in the second set: +A. A square with curved semi-circles on the left and right sides +B. A square with leaf-like shapes in the four corners. +C. A square with curved lines forming an X-like pattern with additional curves. +D. A square with star-like pattern created by curved lines. + +To find the pattern, I need to look for similarities between the first set and second set: + +Comparing the first figures of each set: + +- First set: Square with straight lines +- Second set: Square with curved lines + +Comparing the second figures of each set: +- First set: Square with zigzag straight lines +- Second set: Square with a circle and curved lines + +I notice that the second set seems to be using curved lines where the first set uses straight lines. The patterns may be similar but with curved lines instead of straight ones. +Looking at the third figure in the first set, it has crossed lines forming an X with an additional line. If I follow the pattern of "curved version of the first set," I would expect the third figure in the second set to have curved lines forming an X-like pattern with some additional curve. + +Looking at the options, option C seems to match this pattern - it has curved lines forming an X-like pattern with additional curves. + +However, I should also check if there's another pattern I'm missing: + +Another possibility is that there's a progression across each row. In the first set, we see increasing complexity or number of lines. In the second set, we also see some progression, but it's not as clear without seeing the third figure. + +Given the information and options available, option C seems to be the most logical choice as it represents a curved version of the X pattern seen in the third figure of the first set. + + +I need to analyze both sets of figures to find patterns and determine which option would complete the second set. + +First, let me examine both sets of figures: + +**First Set (Top Row):** + +1. Square with straight lines forming an angle in the bottom left +2. Square with zigzag diagonal lines +3. Square with straight lines forming an X plus a vertical line on the right + +**Second Set (Middle Row):** + +1. Square with curved lines forming what looks like a distorted X +2. Square with a circle and curved lines +3. Question mark - to be determined + +**Key Observation:** The second set appears to use curved lines to create patterns similar to the straight-line patterns in the first set. + +Looking at the relationship: +- First figure in set 1 has straight angles; first figure in set 2 has curved versions +- Second figure in set 1 has zigzag lines; second figure in set 2 has curved lines with a circle + +For the third position, I should look for a figure that uses curved lines to represent the X-pattern plus vertical line from the third figure in set 1. + +Looking at the options: + +- Option A: Curved lines forming semi-circles on left and right +- Option B: Curved lines forming leaf-like shapes in corners +- Option C: Curved lines forming an X-like pattern with additional curves +- Option D: Curved lines forming a star-like pattern + +Option C seems most appropriate as it shows curved lines forming an X-like pattern, similar to how the third figure in the first set has straight lines forming an X. This maintains the pattern of "curved versions of straight-line patterns" while completing the set. + +Answer: C + +Error Category: Reasoning Error + +Error Reason: The model incorrectly chooses (C) because it overly focuses on surface-level visual similarity (the X-like shape) without accounting for the systematic transformation from the first row of blocks to the second row of blocks. + +# Analogical (Hard) + +![](images/56af6eb71d6e2339d73f592d622135add9ce4337cf2f468cd37ce3f84689bcee.jpg) +Figure 25: A sample correct case of Analogical Reasoning (difficulty: Hard). + +![](images/f2b1736102b305edc25f255215e05f78a8d01fa59b7ea9760565a410ccf255c6.jpg) + +![](images/b8c7c0234db97446c1b2e4d704709d806fe90a0242a41fc9f438630674fb830f.jpg) +A B C D + +Question: This question contains two sets of figures and 4 figures to choose from. The two sets of figures have certain similarities as well as differences. You are required to choose one option that is most suitable to replace the question mark. The correct answer should not only make the two sets of figures the most similar, but also shows uniqueness for the second set of figures. + +# Ground Truth: (C) + +Claude-3.7-Sonnet-Thinking: Let me carefully analyze this problem. + +We have two sets of figures: +- First set: Three figures in a row +- Second set: Two figures and a question mark + +I need to find the best option (A, B, C, or D) to replace the question mark, such that the two sets become most similar while preserving the uniqueness of the second set. + +First, let me analyze the first set: + +1. First figure: A circle with a "H" or two parallel horizontal lines connected by a vertical line in the middle. +2. Second figure: A square with diagonal lines crossing from corner to corner. +3. Third figure: A simple circle. + +Now, let me analyze the second set (before the question mark): + +1. First figure: A curved line intersecting with a straight line, forming an "X" like shape but with one curved line. +2. Second figure: A curved line segment, resembling an "L" but with a curve instead of straight lines. +3. Question mark: This is what we need to figure out. + +Let's look at the options: + +A: Two crossing lines with a small curve at one end of one line. +B: Two curved lines crossing in an "X" for +C: A circle with two perpendicular lines crossing it, forming four equal segments. +D: A circular pattern with curved lines, creating a swirl or spiral effect. + +Now, let me try to identify the pattern or relationship between the two sets: + +In the first set, we start with a circle containing a shape (H), then move to a square with crossed lines, and finally a simple circle. + +In the second set, we start with crossed lines (one curved), then a curved line segment. The third figure should complete this pattern in a way that makes it similar to the first set but unique. + +Looking at the transformation from first set to second set: + +1. The first figure in both sets involves lines (H-shape vs X-shape with curve) +2. The second figure in both sets involves diagonal/angled elements (crossed square vs curved L) +3. The third figure in the first set is a simple circle + +So what would make the third figure in the second set similar yet unique? + +Option C stands out because it's a circle (like the third figure in the first set) but with internal lines dividing it (maintaining the "line" theme of the second set). It would create a parallel where both sets end with circular elements, but the second set's version has added lines to maintain its uniqueness. + +Let me double-check by considering another angle. Maybe there's a transformation rule from each figure in the first set to the corresponding figure in the second set: + +1. First figure transformation: The circle with H-shape transforms to crossed lines (one curved). +2. Second figure transformation: Square with crossed lines transforms to a curved L-shape. +3. Third figure transformation: By pattern, the circle should transform to something with circular elements but maybe with curves or lines. + +Looking at options again, C and D both have circular elements. C has straight lines dividing the circle while D has a swirl pattern. + +Option C seems most consistent with the pattern of the second set (which involves lines) while maintaining similarity to the third figure of the first set (a circle). + +So I believe C is the most likely answer. + + + +I'll solve this step by step by analyzing the two sets of figures and looking for patterns. + +**First Set of Figures:** + +1. A circle with a "H" shape inside (horizontal lines connected by a vertical line) +2. A square with diagonal crossed lines +3. A simple circle + +**Second Set of Figures:** + +1. An intersection of a straight line and a curved line +2. A curved line in an L-like shape +3. Question mark - needs to be determined + +**Looking for the Pattern:** + +I need to understand how these sets relate to each other to find the right option for the question mark. + +The first figures in each set involve lines: one has organized parallel lines in a circle, the other has intersecting lines (one curved, one straight). +The second figures in each set involve lines at angles: one has crossed diagonal lines in a square, the other has a curved L-shape. +For the third figure, the first set has a simple circle. Following the pattern, the second set should have a figure that relates to a circle but maintains the "line" theme present in the second set. +**Analyzing the Options:** +A: Two crossed lines with a curved end +B: Two curved lines crossing in an X pattern +C: A circle divided into four equal parts by perpendicular lines +D: A swirl pattern with curved lines +**Decision:** +Option C makes the most sense because: +1. It contains a circle (relating to the third figure in the first set) +2. It includes straight lines (maintaining the line theme of the second set) +3. The division of the circle into four parts creates a unique element for the second set while still maintaining similarity to the first set +Answer: C + +# Deductive (Easy) + +![](images/583d34c288f215b97b70b7361ce23897f12f100ad60e17ac83d9a8aebba2de98.jpg) +Figure 26: A sample error case of Deductive Reasoning (difficulty: Easy). + +
GOBDE
NPKAJ
OZGFQ
WVIYN
IQTH
+ +Question: Bruce ciphers words adopting the board demonstrated. For example, the word DVEJ is ciphered as GT WI GH NH. What word did Bruce cipher NQ OH OT WI? + +Options: A: XOCR. B: DJWS. C: KQFV. D: KFQV + +Ground Truth: (C) + +Claude-3.7-Sonnet-Thinking: I see a 5x5 grid of letters that appears to be the board used for Bruce's cipher. Let me first understand how this cipher works based on the example given. + +The example states that the word "DVEJ" is ciphered as "GT WI GH NH". + +Let me analyze the pattern: + +D is ciphered as GT +V is ciphered as W1 E is ciphered as GH +- 1 is ciphered as GH +- j is ciphered as NH + +I need to figure out the rule that transforms each letter into two letters based on this board. + +Looking at the board, I can see that the first column contains G, N, O, W, I. +The first row contains G,O,B,D,E +The left column and top row are highlighted in green. +The bottom row is highlighted in yellow. +Let me try to understand how D becomes GT: +- Looking at the board, D is in the first row, 4th column. +-G is the letter in the first row, first column. +- T is not directly visible in the board as shown. + +Let me try a different approach. Maybe the cipher works by identifying the row and column of each letter. + +For the letter D: + +- D is in the first row, 4th column. +- The letter at the beginning of row 1 is G. +- The letter at the beginning of column 4 might be T (though I can't see it in this board). + +For the letter V: + +- V appears to be in the 4th row, 2nd column. +- The letter at the beginning of row 4 is W. + +- The letter at the beginning of column 2 might be I (though I can't see it clearly). + +Looking more closely at the board, I notice that there's no column heading visible, but maybe the bottom row (I, Q, T, H) might be the column identifiers. This seems plausible. + +Let me check if this makes sense: + +- D is in row 1, column $4 \rightarrow G$ (row 1 identifier) + T (column 4 identifier) = GT +- V is in row 4, column $2 \rightarrow W$ (row 4 identifier) + Q (column 2 identifier) + +But this doesn't match the example where V is ciphered as WI, not WQ. + +Maybe the column identifiers are different or the rule is different. Let me re-examine... + +Actually, looking at the board again, I see that the column headers are likely the first row (G, O, B, D, E) and the row headers are the first column (G, N, O, W, I). + +So: + +- D is at position (1, 4), so its coordinates would be GD (row G, column D). +- V isn't clearly visible in the grid shown. + +Let me try yet another approach. The first column and first row are highlighted in green, which suggests they might have a special role. + +Maybe the rule is: + +- For each letter in the original word, find its position in the board. +- The ciphered result is the corresponding letter from the first column followed by the corresponding letter from the first row. + +Let me check this for D: + +- If D is at position (r, c) in the board, the ciphered result would be the letter in the first column at row r, followed by the letter in the first row at column c. + +But I still don't see how D becomes GT based on the board provided. + +Wait, maybe I need to interpret the board differently. Looking at the board, I notice: + +- G is in the first row, first column. +- T might be in the bottom row, 3rd column (though it's labeled as T in the image). +- W is in the 4th row, first column. +- I is in the 3rd row, 3rd column (or in the bottom row, 1st column). +- G is in the first row, first column. +-H is in the bottom row, 4th column. +- N is in the 2nd row, first column. +- H is in the bottom row, 4th column. + +I'm still not seeing a clear pattern. + + + +Error Category: Reject to Answer + +Error Reason: The model reject to answer since it fails to settle on a single consistent ciphering rule or final answer. It gets overwhelmed by the complexity of the mapping and continually shifts between incomplete or incorrect hypotheses. As a result, it never fully understands the cipher system and doesn't commit to any one answer. + +# Deductive (Easy) + +![](images/16fa6aae6f434bae2f7c980d27911263d2f0d6dcdde43098f76431b41a04006d.jpg) + +![](images/13c356b90f683f73e24d1a4dadc822c871d32f6cf7b204e3c8cfa4b0e7433dad.jpg) +Figure 27: A sample correct case of Deductive Reasoning (difficulty: Easy). + +Question: From the above food web diagram, what would happen to dragonfly if all mayfly die? Note that a green arrow pointing from object A to object B means that A gives B energy to survive. + +Options: A: remains the same. B: increase. C: decrease. D: NA + +Ground Truth: (C) + +Claude-3.7-Sonnet-Thinking: Let me analyze the food web diagram to understand the relationships between different organisms. + +In this food web: + +- Green arrows point from an organism that gives energy to another organism (food source $\rightarrow$ consumer) +- I need to examine what would happen to dragonfly nymphs if all mayfly nymphs die + +Looking at the diagram: + +- Mayfly nymphs feed on pondweed (arrow from pondweed to mayfly) +- Dragonfly nymphs feed on mayfly nymphs (arrow from mayfly to dragonfly) +- Dragonfly nymphs also feed on freshwater shrimp (arrow from freshwater shrimp to dragonfly) +- Brown trout feeds on dragonfly nymphs (arrow from dragonfly to brown trout) + +If all mayfly nymphs die: + +- Dragonfly nymphs would lose one of their food sources (mayfly nymphs) +- Dragonfly nymphs would still have freshwater shrimp as a food source +- But losing mayfly nymphs would reduce the total available food for dragonfly nymphs + +Therefore, the population of dragonfly nymphs would likely decrease because one of their food sources is gone, although they would not go extinct since they have another food source. This makes the correct answer (C) decrease. + + + +Analysis of Food Web and Effect on Dragonfly Nymphs + +Looking at the food web diagram, I need to determine what would happen to dragonfly nymphs if all mayfly nymphs died out. + +Examining the Relationships + +I can see that green arrows point from one organism to another, indicating energy transfer (food source $\rightarrow$ consumer). + +From the diagram, I can observe: + +- Mayfly nymphs feed on pondweed +- Dragonfly nymphs feed on mayfly nymphs +- Dragonfly nymphs also feed on freshwater shrimp +- Brown trout feeds on dragonfly nymphs + +## Impact Analysis + +If all mayfly nymphs died: + +1. Dragonfly nymphs would lose one of their primary food sources +2. Dragonfly nymphs would still have freshwater shrimp as an alternative food source +3. However, the loss of mayfly nymphs would reduce the total available food for dragonfly nymphs + +Conclusion + +Since dragonfly nymphs would lose a significant food source (mayfly nymphs), their population would likely decline, even though they have another food source. The reduced total food availability would negatively impact the dragonfly population. + +Answer: C + +# Deductive (Medium) + +![](images/01e60a93178e8267e318f99e39c4cca1b82b8f7d2705c3cfe0ae49ccbac99c55.jpg) + +![](images/4798f6c391395f4a6803778aab8ae4f3f1792d475d51602ec2691394a3f0b0da.jpg) +Figure 28: A sample error case of Deductive Reasoning (difficulty: Medium). + +Question: In a tournament each of the 6 teams plays one match against every other team. In each round of matches, 3 take place simultaneously. A TV station has already decided which match it will broadcast for each round, as shown in the diagram. In which round will team D play against team F? + +Options: A: 1. B: 2. C: 3. D: 4 + +# Ground Truth: (A) + +Claude-3.7-Sonnet-Thinking: This question is asking about a tournament where each team plays one match against every other team. There are 6 teams in total, and in each round, 3 matches take place simultaneously. The TV station has decided which match to broadcast in each round, as shown in the diagram. + +Let's analyze what the diagram tells us: + +- Round 1: The match being broadcast is A-B +- Round 2: The match being broadcast is C-D +- Round 3: The match being broadcast is A-E +- Round 4: The match being broadcast is E-F +- Round 5: The match being broadcast is A-C + +We need to determine in which round team D will play against team F. + +First, let's think about how many rounds there will be in total. With 6 teams, each team needs to play against 5 other teams. So there are a total of $(6 \times 5) / 2 = 15$ matches. In each round, 3 matches take place, so there will be $15 / 3 = 5$ rounds in total. + +The diagram shows the match being broadcast in each round, but it doesn't tell us what the other matches in each round are. To figure this out, I need to work out the full schedule of matches. + +In round 1, A plays against B (as shown in the diagram). So teams C, D, E, and F must form the other two matches in round 1. Let's say these are C-D and E-F. In round 2, C plays against D (as shown in the diagram). So teams A, B, E, and F must form the other two matches in round 2. Let's say these are A-E and B-F. + +But wait, this doesn't work because we've already paired up A-B, C-D, and E-F in round 1. Let's try a different approach. + +Let's try to systematically figure out the schedule. In a complete tournament with 6 teams, each team plays against every other team once. So each team plays 5 matches. There are a total of $(6 \times 5) / 2 = 15$ matches. With 3 matches per round, we need $15 / 3 = 5$ rounds, as I calculated earlier. + +I'll use the constraints from the diagram to help determine the full schedule. + +Round 1: A-B is one of the matches. The other two matches must use teams C, D, E, and F. Let's try to determine what those are. + +Round 2: C-D is one of the matches. The other two matches must use teams A, B, E, and F. + +Round 3: A-E is one of the matches. The other two matches must use teams B, C, D, and F. + +Round 4: E-F is one of the matches. The other two matches must use teams A, B, C, and D. + +Round 5: A-C is one of the matches. The other two matches must use teams B, D, E, and F. + +I'll try filling in a table of which teams play in each round. + +Round 1: A-B, C-? E-? + +Round 2: C-D, A-? , ?- + +Round 3: A-E, ?-? , ?-? + +Round 4: E-F, ?-? , ?-? + +Round 5: A-C, ?-? , ?-? + +Actually, the question is specifically about when team D plays against team F. So, I really just need to find out which round that happens in. + +From the diagram, we know which matches are broadcast in each round: + +- Round 1: A-B +- Round 2: C-D +- Round 3: A-F +Round 9: A-E Round 4: F-E +Round 1: A C + +So the matches involving D are: + +- Round 2: D plays against C. + +- Other rounds: D plays against A, B, E, and F (one in each round). + +The matches involving F are: + +- Round 4: F plays against E. +- Other rounds: F plays against A, B, C, and D (one in each round). + +Now, in which round do D and F play against each other? It can't be round 2, because D plays against C in that round. It can't be round 4, because F plays against E in that round. So D and F play against each other in either round 1, 3, or 5. + +Let's check round 1. We know A plays against B. D and F could potentially play against each other in round 1. + +Let's check round 3. We know A plays against E. The other teams (B, C, D, and F) form the other two matches. If D plays against F in this round, then B must play against C. + +Let's check round 5. We know A plays against C. The other teams (B, D, E, and F) form the other two matches. If D plays against F in this round, then B must play against E. + +Hmm, I'm having trouble determining when D plays against F. + + + +# Error Category: Reject to Answer + +Error Reason: The model rejects to answer since it fails to complete the full tournament schedule or isolate the specific round where D plays F. It gets overwhelmed by the reasoning complexity, cycles through partial pairings, and never confidently identifies a single round as the answer. As a result, it ends with indecision and no final response. + +# Deductive (Medium) + +![](images/c026a961938ed94885c911fa25839173a82101bc93bfae62d8fee252741b6a94.jpg) +Figure 29: A sample correct case of Deductive Reasoning (difficulty: Medium). + +
LWBVMQ
ZPUKGJ
ADHLRZ
GSXEOY
KICAFT
MFICS
+ +Question: Jeff represents words applying the board demonstrated. For example, the word DCXEH is represented as AM KF GF GI AF. What word did Jeff represent KS KF LC LF GM? + +Options: A: TSMBC. B: TCMBS. C: PEQGT. D: FHMTK + +# Ground Truth: (B) + +Claude-3.7-Sonnet-Thinking: I'm given a grid or board that represents a mapping from letters to two-letter codes. Let's examine how the mapping works by looking at the example. + +The word DCXEH is represented as AM KF GF GI AF. + +Let me determine the pattern: + +- D is represented as AM +- C is represented as KF +- X is represented as GF +E is represented as GI +- H is represented as AF + +Looking at the image, I see a grid with rows labeled with L, Z, A, G, K and columns labeled with W, B, V, M, Q, and a bottom row with M, F, I, C, S. + +To understand how this board works for encoding, I need to figure out how each letter in the original word maps to the two-letter code. + +For D, it maps to AM. D appears in the first column of the row labeled A. So A (row) + M (column bottom row) = AM for D. + +For C, it maps to KF. C appears in the third column of the row labeled K. So K (row) + F (column bottom row) = KF for C. + +For X, it maps to GF. X appears in the third column of the row labeled G. So G (row) + F (column bottom row) = GF for X. + +For E, it maps to GI. E appears in the fourth column of the row labeled G. So G (row) + I (column bottom row) = GI for E. + +For H, it maps to AF. H appears in the third column of the row labeled A. So A (row) + F (column bottom row) = AF for H. + +So the pattern seems to be: + +1. Locate the letter in the grid + +2. The first letter of the code is the row label + +3. The second letter of the code is the value in the bottom row under the column where the letter is found + +Now let's decode KS KF LC LF GM: + +KS: + +Row K, column with bottom row value S. This refers to column 5, which contains T. + +KF: + +Row K, column with bottom row value F. This refers to column 2, which contains C. + +LC: + +Row L, column with bottom row value C. This refers to column 4, which contains M. + +LF: + +Row L, column with bottom row value F. This refers to column 2, which contains B. + +GM + +Row G, column with bottom row value M. This refers to column 1, which contains S. + +So the decoded word is TCMBS. + + + +# Decoding the Word Representation + +I need to figure out how Jeff's encoding system works based on the example, then decode the given sequence. + +## Understanding the Encoding System + +In the example: + +- DCXEH is represented as AM KF GFI AF + +Looking at the grid provided in the image, I can see this is a coordinate system: + +- The rows are labeled with letters L, Z, A, G, K + +- The columns have a bottom row with values M, F, I, C, S + +The encoding seems to work like this: + +1. For each letter in the original word, find its position in the grid + +2. The first part of the code is the row label + +3. The second part is the value from the bottom row of that column + +Let me verify with the example: + +- D is in row A, column with M at bottom $\rightarrow$ AM + +- C is in row K, column with F at bottom $\rightarrow$ KF + +- X is in row G, column with F at bottom $\rightarrow$ GP + +- E is in row G, column with I at bottom $\rightarrow$ GI + +- H is in row $A_{t}$ column with F at bottom $\rightarrow \mathrm{AF}$ . + +## Decoding the Given Sequence + +Now let's decode KS KF LC LF GM: + +- KS: Row K, column with S at bottom $\rightarrow$ The letter is T + +- KF: Row K, column with F at bottom $\rightarrow$ The letter is C + +- LC: Row L, column with C at bottom $\rightarrow$ The letter is M + +- LF: Row $\mathrm{L}_r$ , column with F at bottom → The letter is B + +- GM: Row G, column with M at bottom $\rightarrow$ The letter is S + +This gives us TCMBS, which matches option (B). + +Answer: B + +# Deductive (Hard) + +![](images/ec18a4ac4c2468c7500f4f846ec923d07b52c062bec7b6f9ea05e65fb8aa82cd.jpg) + +![](images/7e313e170b470582b9ae9c2a7973d28c3e99cdd5aa3de4c0ceb96af05e72e7c3.jpg) +Figure 30: A sample error case of Deductive Reasoning (difficulty: Hard). + +Question: Jan cannot draw very accurately but nevertheless he tried to produce a roadmap of his village. The relative position of the houses and the street crossings are all correct but three of the roads are actually straight and only Qurwik street is not. Who lives in Qurwik street? + +Options: A: Amy. B: Ben. C: Carol. D: David + +# Ground Truth: (C) + +Claude-3.7-Sonnet-Thinking: In this problem, I'm given a drawing of a village roadmap made by Jan. According to the problem: + +1. The relative positions of houses and street crossings are correct +2. Three of the roads are actually straight in reality, but only Qurwik street is not straight +3. The houses are labeled with the names: Amy, Ben, Carol, and David +4. I need to determine who lives on Qurwik street + +Looking at the image, I can see a diagram with four houses labeled with the names and several curved roads connecting them. + +Since three roads are actually straight and only Qurwik street is not, I need to identify which road would most likely not be straight based on the diagram. + +The roads in the diagram appear to be: + +1. A road connecting Amy's house to the network +2. A road connecting Carol's house to the network +3. A road connecting David's house to the network +4. A road connecting Ben's house to the network +5. Various segments connecting these houses together + +Since we're told that only one road is not straight, and we see multiple curved segments in the diagram, it must be that these curved segments represent the same road - Qurwik street. + +Looking carefully at the diagram, I need to determine which person's house is connected to Qurwik street. + +The diagram shows curved roads connecting different houses. Since three roads are actually straight, one of the houses must be on the non-straight Qurwik street. + +Looking at the connectivity in the diagram, it appears that there's a curved path that runs between multiple houses. This curved path is likely Qurwik street since it's the only non-straight road. + +Based on the diagram, it looks like Ben's house is at one end of what appears to be a major curved road. David's house seems to be connected to this same curved road. Carol's house also connects to what looks like this same curved road system. Amy's house seems to be connected to part of this curved network as well. + +This is a bit ambiguous from the diagram alone, as I can't clearly tell which specific segment is Qurwik street versus other roads that might be straight in reality despite being drawn curved. + +Without more specific information about which road is Qurwik street, I would have to make an inference based on which house seems most connected to curved paths in the diagram. + +Looking at the houses and their positions relative to the roads: + +- Amy's house appears to be connected to a curved road segment +- Ben's house appears to be at the end of a curved road segment +- Carol's house connects to a curved road segment +- David's house connects to multiple curved segments + +Based on the diagram, David's house seems to be most central to the curved road network, with multiple curved segments connecting to it. This suggests David might be on Qurwik street. + +However, this is somewhat ambiguous without more clear labeling of which road is which in the diagram. + + + +Solving the Village Roadmap Problem + +I need to determine who lives on Qurwik street, given that three roads are actually straight and only Qurwik street is not straight. + +Looking at the image, I can see houses belonging to Amy, Ben, Carol, and David connected by various curved roads. Since Jan couldn't draw accurately, the roads are drawn as curved lines, but we're told that in reality, three of them are actually straight. + +Analyzing the Road Network + +In the diagram, I can see: + +- Four houses connected by a network of curved roads + +- Multiple intersections between these roads + +Each house is connected to at least one road + +Identifying Qurwik Street + +To determine which road is Qurwik street (the only non-straight road), I need to analyze the pattern of connections. + +Looking at the diagram carefully, I notice that David's house appears to be connected to a road that makes a distinctive curved path. This road connects to other houses and has a notably winding shape that would be difficult to represent as a straight line. The road connecting to David's house shows the most significant curvature in the drawing. + +Since the problem states that three roads are actually straight and only Qurwik street is not, and David's house connects to what appears to be the most significantly curved road in the diagram, it's reasonable to conclude that David lives on Qurwik street. + +Answer: D + +Error Category: Understanding Error + +Error Reason: The model misinterprets the condition that only one road (Qurwik street) is not straight, while the other three are. It misunderstands the roads that seem to be curved in the image are not all really curved. + +# Deductive (Hard) + +![](images/60adf99283738943138682dd44b10b03153727886a6da65dcb8737ff516f26e7.jpg) + +![](images/61b015d2546abf578760e8cc3932dd392875f94640dc490a388206fc830df894.jpg) +Figure 31: A sample correct case of Deductive Reasoning (difficulty: Hard). + +Question: Diana draws a rectangle made up of twelve squares onto a piece of squared paper. Some of the squares are coloured in black. She writes the number of adjacent black squares into every white square. The diagram shows an example of such a rectangle. Now she does the same with a rectangle made up of 2018 squares. What is the biggest number that she can obtain as the sum of all numbers in the white squares? + +Options: A: 2800. B: 2950. C: 3025. D: 3100 + +Ground Truth: (C) + +Claude-3.7-Sonnet-Thinking: In this problem, Diana has a rectangle made up of squares, some of which are colored black. Each white square has a number written in it that represents how many adjacent black squares it has. + +In the example given: + +- The first row: 1, [black], 2, 1 +- The second row: 0, 3, [black], 1 +- The third row: 1, [black], 2, 1 + +The rectangle has 12 squares total, with 4 of them being black (marked as [black]) and 8 white (with numbers). + +Now, Diana does the same with a rectangle made up of 2018 squares. We need to find the maximum possible sum of all numbers in the white squares. + +Let's think about how to maximize this sum. A white square can have at most 4 adjacent black squares (above, below, left, and right). So each white square can contribute at most 4 to the sum. + +Let's consider the example given, which is a $3 \times 4$ grid. There are 4 black squares and 8 white squares. The sum of numbers in white squares is $1 + 2 + 1 + 0 + 3 + 1 + 1 + 2 + 1 = 12$ . + +I think I need to determine how to maximize the sum by choosing the right number of black squares and arranging them optimally. + +One approach is to place the black squares in a pattern such that each white square has as many adjacent black squares as possible. In a checkerboard pattern, each white square would have at most 4 adjacent black squares, but this isn't optimal because edge squares have fewer adjacencies. + +Act, t a h n . + +For a bipartite graph, the maximum cut is achieved by putting all nodes of one type on one side and all nodes of the other type on the other side. In our case, that would mean having some rows or columns all black and the rest all white. + +Let's say our grid is $\mathbf{r} \times \mathbf{c}$ , and we color some rows all black and the rest all white. If we color k rows all black, then the number of adjacencies between black and white squares would be $\mathrm{k} \times \mathrm{c} \times 2$ (because each row has c squares, and each black square in a row can have at most 2 adjacencies with white squares, one above and one below). + +Let's consider a more specific pattern. Imagine our grid is $2 \times 1009$ (so it has 2018 squares total). We color the bottom row all black and the top row all white. Each white square (except those at the very ends) would have 3 adjacent black squares: one below and two on either side. The white squares at the ends would have 2 adjacent black squares. + +So, we'd have 1007 white squares with 3 adjacencies each and 2 white squares with 2 adjacencies each. The total sum would be $1007 \times 3 + 2 \times 2 = 3025$ . + + + +Solving the Maximum Sum Problem for Diana's Rectangle + +I need to find the maximum possible sum of the numbers in white squares when Diana creates a rectangle made of 2018 squares. + +## Understanding the Problem + +- Diana draws a rectangle with some squares colored black. +- Each white square contains a number indicating how many adjacent black squares it has +- Adjacent means sharing an edge (up, down, left, right) +- Maximum possible number for any white square is 4 (if all 4 adjacent squares are black) +- We need to find the maximum possible sum across all white squares + +Strategy + +To maximize the sum, I'll need to determine: +1. How many squares should be black vs. white +2. The optimal arrangement of these squares + +Analysis + +Let's consider what makes the sum large: +- Each white square can contribute at most 4 to the sum +- Each black square can contribute to at most 4 adjacent white squares +- The total represents the number of adjacencies between black and white squares + +For a 2018-square rectangle, the optimal shape matters. Let's try a $43 \times 47$ rectangle (43×47=2021, which is close enough to 2018 for our analysis). + +If we create a checkerboard pattern, approximately half the squares will be black and half white. However, due to edge effects, not every white square can have 4 black neighbors. + +Calculating the Maximum + +For a rectangle with dimensions $r \times c$ , the maximum sum occurs when: +- Black squares are placed in a checkerboard pattern +- The rectangle is as close to square as possible (minimizing perimeter) + +For a nearly square grid with approximately 1009 white squares and 1009 black squares, each interior white square contributes 4, while edge white squares contribute less. + +From a theoretical analysis, the maximum sum would be: $-55 \times 55 = 3025$ + +This corresponds to option (C) 3025. + +Answer: C + +# Inductive (Easy) + +![](images/cbd2f7e3140808a83ca69a0a2e1610dd9a654841f60dae20b2433020e2adb7e1.jpg) + +![](images/f9ae1628f4f27c67e42a309bcf565c5199ee7d184e1863bf889224ec5abf6921.jpg) +Figure 32: A sample error case of Inductive Reasoning (difficulty: Easy). + +A. + +![](images/094995f7fd6661fa2cf01e8206d18ba8f31705d31dd9700b64a1e8c398d45ed3.jpg) + +B + +![](images/4d6f1428c39c2fafedb135682adbf4b8de0cd898e86e0609786958190341c596.jpg) + +C + +![](images/5ebbe461b93a2cbe955c72fb377c52e5c4b6a02d084792aa8be5cc43571a02a0.jpg) + +D + +Question: Find one figure from the four below that displays a different pattern from the other three. + +# Ground Truth: (D) + +Claude-3.7-Sonnet-Thinking: Let me analyze each figure to identify any patterns or differences among them: + +Figure A: This is a hexagon with two curved lines inside and a black dot in one corner. +Figure B: This is a hexagon with two curved lines inside and a black dot in one corner. +Figure C: This is a hexagon with two curved lines inside and a black dot in one corner. +Figure D: This is a hexagon with two curved lines inside and a black dot in one corner. + +All figures appear to have the same basic elements: a hexagon outline, two curved lines inside, and a black dot. Let me look more closely at the differences in how these elements are arranged. + +Looking at the curved lines: + +- Figure A: The two curved lines appear to form a single continuous curve that divides the hexagon. +- Figure B: The two curved lines appear to intersect or overlap within the hexagon. +- Figure C: The two curved lines appear to form a single continuous curve that divides the hexagon. +- Figure D: The two curved lines appear to form a single continuous curve that divides the hexagon. + +Looking at the position of the dot: + +- Figure A: The dot is in one corner of the hexagon. +- Figure B: The dot is in one corner of the hexagon. +- Figure C: The dot is in one corner of the hexagon. +- Figure D: The dot is in one corner of the hexagon. + +Let me look even more closely at the arrangement of the curves: + +- Figure A: The curves divide the hexagon into sections, with one section containing the dot. +- Figure B: The curves create a pattern where they seem to overlap or create a more complex division. +- Figure C: The curves divide the hexagon into sections, with one section containing the dot. +- Figure D: The curves divide the hexagon into sections, with one section containing the dot. + +Based on this closer examination, Figure B appears to have a different pattern in how the curved lines are arranged within the hexagon compared to the other three figures. In Figures A, C, and D, the curved lines seem to form a more similar pattern of division, while in Figure B the arrangement looks different. + +Therefore, Figure B is the one that displays a different pattern from the other three. + +# Pattern Recognition Problem Analysis + +I need to analyze each figure and identify which one has a different pattern from the others. + +Looking at all four figures: + +- All are hexagons +- All contain curved lines inside +- All have a single black dot +- All have two curved lines creating sections within the hexagon + +Let me examine the specific arrangement of these elements in each figure: + +## Figure A: + +- Two curved lines that create sections +- The black dot is positioned in the lower left corner +- The curves form a pattern that divides the hexagon into three sections + +Figure B: + +- Two curved lines that appear to overlap or intersect +- The black dot is positioned in the lower right corner +- The curves create a different division pattern compared to the other figures + +Figure C: + +- Two curved lines that create sections +- The black dot is positioned in the lower left corner +- The pattern of curve division is similar to Figure A + +Figure D: + +- Two curved lines that create sections +- The black dot is positioned in the lower right corner +- The pattern of curve division is similar to Figures A and C. + +The key difference is in how the curved lines are arranged. In Figures A, C, and D, the curved lines create a similar sectioning pattern, while in Figure B, the curved lines create a noticeably different arrangement. + +Answer: B + +Error Category: Reasoning Error + +Error Reason: The model makes a reasoning error by overemphasizing minor variations in the curve overlap while overlooking the more fundamental structural difference. Figure D is the only one where the black dot and curves are positioned differently, breaking the symmetry seen in A, B, and C. The model incorrectly identifies B as the outlier due to superficial curve complexity rather than recognizing the true deviation in spatial structure. + +# Inductive (Easy) + +![](images/afd0faec9feb947504d44ca25613ade5faa37fee1a4f3a557b8ced9f2809e7ed.jpg) + +Figure 33: A sample correct case of Inductive Reasoning (difficulty: Easy). +![](images/620f6d56098b109afe8ae4b63539ad8f4f4d3e720dff2c685ff3ad62e3d73bee.jpg) +A. 3.5.6.7 +B. +C. +D. + +Question: Analyze the pattern of changes in the figure below, and choose the appropriate fifth row: + +Ground Truth: (B) + +Claude-3.7-Sonnet-Thinking: Let me analyze the pattern in this figure carefully. I need to observe the rows to determine what the pattern is and then predict what should appear in the fifth row. + +Row 1: + +Row 2: + +Row 3: + +Row 4: + +Looking at this pattern, I notice that: +- Each row has 7 symbols +Each row contains a mix of hearts $(\bigcirc)$ and flowers $(\bullet)$ +- There seems to be a shifting pattern + +I notice that if I track the position of the hearts $(\bigcirc)$ : + +-Row 1:positions 1,3,4 +-Row 2:positions 2,4,5 +-Row 3:positions 3,5,6 +-Row 4:positions 4,6,7 + +It looks like the hearts are shifting one position to the right in each row. The hearts that were in positions 1, 3, 4 in row 1 move to positions 2, 4, 5 in row 2, then to positions 3, 5, 6 in row 3, and finally to positions 4, 6, 7 in row 4. + +Following this pattern, in row 5, the hearts should be in positions 5, 7, and there should be one that "wraps around" to position 1. + +So the fifth row should be: $\bigcirc \bigcirc \bigcirc \bigcirc \bigcirc \bigcirc$ + +Now let me check the answer choices: + +A. $\bullet \bullet \bullet \bullet \bullet \bullet \bullet$ - This doesn't match +B.♥ - This matches my prediction +C. $\because \square \quad \square \quad \square \quad \square \quad \square \quad \square \quad \square \quad \square \quad \square \quad \square \quad \square \quad \square \quad \square \quad \square \quad \square \quad \square \quad \square \quad \square \quad \square \quad \square \quad \square \quad \square \quad \square \quad \square \quad \square \quad \square \cdot$ This doesn't match +D. $\bigstar \bigstar \bigstar \bigstar \bigstar \bigstar \bigstar \bigstar -$ This doesn't match + +So the answer should be B. + +Let me double-check by looking at the pattern of the flowers $(\star)$ as well: +-Row 1:positions2,5,6,7 +-Row 2:positions1,3,6,7 +-Row 3:positions1,2,4,7 +-Row 4:positions1,2,3,5 + +The flowers are in all positions where hearts are not. So in row 5, they should be in positions 2, 3, 4, 6. + +That gives: $\text{心}$ + +This confirms that option B (♥ ♀ ♀ ♀ ♀ ♀) is correct. + + + +Analyzing the Pattern in the Figure + +I need to identify the pattern of changes across rows and predict what the fifth row should be. + +## Observation of the Pattern + +Looking at the existing four rows: + +-Row1: +-Row2: +-Row 3: +-Row 4: + +## Pattern Analysis + +I notice that the position of the hearts $(\hat{\mathbb{V}})$ shifts in a consistent way: +-Row 1: Hearts in positions 1,3,4 +-Row 2: Hearts in positions 2,4,5 +-Row 3: Hearts in positions 3,5,6 +-Row 4: Hearts in positions 4,6,7 + +The hearts are shifting one position to the right in each row, with a "wraparound" effect when they reach the end. + +Predicting the Fifth Row + +Following this pattern, in row 5, the hearts should be in positions: +-5 shifted from 4) +-7 (shifted from 6) + +-1 (shifted from 7, wrapped around) + +So the fifth row should be: + +## Checking the Options + +A. $\bullet \bullet \bullet \bullet \bullet \bullet \bullet -\mathrm{Not}$ a match +B.♥ -This matches my prediction +C.♥ -Not a match +D. \(\text{串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串 + +Answer: B + +# Inductive (Medium) + +![](images/88905e754aef245617dad71ab60cf29cbaa7de0c2cdc7bfa6246797edc486a4a.jpg) + +![](images/9b0cd4b7d45ea96e77e1a76fa635f68d59efca6ae415ddcea6116702d2a08391.jpg) +Figure 34: A sample error case of Inductive Reasoning (difficulty: Medium). + +![](images/f43ef894cc362d7c0b9717dfbc06a4a9667abb2d5aff08d0cb8cfc765e949aa3.jpg) + +A + +B + +C + +D + +Question: Choose the most appropriate option from the four given choices to fill in the question mark, so that the figures follow a pattern. + +Ground Truth: (C) + +Claude-3.7-Sonnet-Thinking: Let me analyze the pattern in the given sequence of figures and find what should come in place of the question mark. + +The sequence shows hexagons with internal lines. Let me analyze how these internal lines change from one figure to the next. + +First figure: A hexagon with a single line from the top vertex going down to about the middle of the hexagon. + +Second figure: A hexagon with a single line from the upper-right vertex going to the middle-left of the hexagon. + +Third figure: A hexagon with a single line from the bottom-right vertex going to the upper-left of the hexagon. + +Fourth figure: A hexagon with a single line from the bottom-left vertex going to the upper-right of the hexagon. + +So the pattern seems to be that the line rotates in a clockwise direction around the hexagon. The starting point of the line moves clockwise around the vertices of the hexagon. + +Given this pattern, the fifth figure (the one with the question mark) should have a line from the lower vertex (at the bottom) going up to the upper-right of the hexagon. + +Now let's check the options: + +Option A: A hexagon with a line from the left vertex going to the right vertex. + +Option B: A hexagon with a line from the upper-left vertex going to the lower-right. + +Option C: A hexagon with a line from the top vertex going to the bottom-right. + +Option D: A hexagon with a line from the top vertex going to the bottom. + +Looking at the pattern and the next expected figure, option A seems closest to what I'm expecting - a line going from the left side to the right side. Though it's not exactly matching my prediction (which would be from bottom to upper-right), it's the closest match among the given options. + +OK, I think I've been struggling because I don't have a completely clear view of the images. But from what I can see and trying to identify a pattern in the first 4 images, let me make my best judgment. + +The line inside the hexagon appears to be moving in some pattern as we go from one figure to the next. Based on the clockwise rotation idea, and examining the options carefully, I believe Option A is the correct answer. + + + +Analysis of Pattern in Hexagon Sequence + +I need to identify the pattern in the sequence of figures and determine which option should replace the question mark. + +Examining the Pattern + +Let me analyze the first four figures in the sequence: + +1. First figure: A hexagon with a line from the top vertex to the bottom-left side +2. Second figure: A hexagon with a line from the top-right vertex to the bottom side +3. Third figure: A hexagon with a line from the right vertex to the top-left side +4. Fourth figure: A hexagon with a line from the bottom-right vertex to the left side + +Looking at this sequence, I can see that the starting point of the line moves clockwise around the hexagon (from top $\rightarrow$ top-right $\rightarrow$ right $\rightarrow$ bottom-right). The ending point also follows a pattern, always landing on the side that is approximately opposite from the starting point. + +Predicting the Next Figure + +Following this pattern, the fifth figure should have a line starting from the bottom-left vertex (continuing the clockwise movement of the starting point). The line should end at the top or top-right side. + +Evaluating the Options + +Looking at the provided options: + +- Option A: A hexagon with a line from the left vertex to the right side +- Option B: A hexagon with a line from the upper-left vertex to the lower-right side +- Option C: A hexagon with a line from the upper-left vertex to the right side +- Option D: A hexagon with a line from the top vertex to the right side + +Option A is the only one that features a line starting from the left vertex, which is the closest to the bottom-left vertex that should be next in our pattern. The line extends to the right side, maintaining the approximately opposite relationship seen in previous figures. + +Answer: A + +Error Category: Reasoning Error + +Error Reason: The model correctly observes a clockwise movement but incorrectly predicts the direction and placement of the next line, choosing option A. The model fails to identify the angle of the movement pattern. + +# Inductive (Medium) + +![](images/739f5d92cd82927a2d7bdf4d2ab2309c4bc04f1b4fc7ed3709e892e83ee42b4e.jpg) + +![](images/dabc692ef1c0c1082feaaf1e2462b37ee282600a2506e8c89411d6c3df9a1439.jpg) +① + +![](images/833d68dbfe0935e410c0edebe6b9a388a975d4f0df03af384d4597f191c08d49.jpg) +② + +![](images/61d4e7983c5dbd66a1aaf55dd7c03249b7db7e007c72d194c2194e20780a9fa2.jpg) +$\therefore m = \frac{3}{11}$ ; + +Question: Divide the following six figures into two categories, so that each category displays its own pattern. + +Ground Truth: (D) + +![](images/ad7496557f0542675336a310ca2059dadf3a471b7b68213fd05a31105358d979.jpg) +$④$ +Figure 35: A sample correct case of Inductive Reasoning (difficulty: Medium). + +![](images/34f92a4aa521689cf2d90b4f881ab0bffe6490c57e02a38f4cb90bd5f6129190.jpg) +(5) + +![](images/d96d394318067b904a712079a7165288a420ae879a0d88f7529f2c1c573e3d0f.jpg) +$\therefore m = \frac{3}{11}$ ; + +B. ①④⑥, ②③⑤ + +D. ①②④, ③⑤⑥ + +A. ①③⑤. ②④⑥ +C.①③④. ②⑤⑥ + +Clae-3.7-Sonnet-Thinking: I need to analyze the six figures and find a pattern that divides them into two distinct categories. Let me examine each figure: + +Figure ①: A square with an internal shape that looks like a doorway or page with a fold. +Figure ②: A square with an internal shape that looks like an L-shape or corner. +Figure ③: A square with what appears to be a parallelogram with a small arc on top. +Figure 4: A square with some zigzag or lightning-bolt-like shape inside. +Figure ⑤: A square with a circle that has an X or cross inside it. +Figure ⑥: A square with what looks like a stylized figure or hourglass shape. + +Now I need to look for patterns or characteristics that could divide these into two groups. Some possible categorization criteria: + +- Curved vs. straight lines +- Symmetrical vs. asymmetrical +- Simple vs. complex +- Open vs. closed shapes +- Single shape vs. multiple shapes + +Let me try to apply these criteria: + +Curved vs. straight lines: + +- Figures with prominent curves: ⑤ (circle with X) + +Figures with mainly straight lines: ①, ②, ③, ④, ⑥ + +That doesn't divide evenly into two groups. + +Symmetrical vs. asymmetrical: +- More symmetrical: ⑤ (circle with X), possibly ⑥ +- Less symmetrical/asymmetrical: ①, ②, ③, ④ + +Simple vs. complex: + +- Simpler shapes: ②, ⑤ +- More complex shapes: ①, ③, ④, ⑥ + +Open vs. closed shapes inside the square: + +- Closed shapes: ⑤ (circle) +- Open shapes: ①, ②, ③, ④, ⑥ + +Let me look at whether the internal shapes touch the outer square: + +- Touching the outer square: ①, ②, ④, possibly ③ and ⑥ +- Not touching the outer square: ⑤ + +Another possibility - shapes with internal divisions vs. shapes without: +- With internal divisions: ①, ④, ⑤ +- Without internal divisions: ②, ③, ⑥ + +Let me check the answer choices and see if any of these patterns match: + +A. ①③⑤, ②④⑥ +B. ①④⑥, ②③⑤ +C. ①③④, ②⑤⑥ +D. ①②④, ③⑤⑥ + +Looking at option A: What could unite $①③⑤$ ? They don't seem to share an obvious common pattern. What could unite $246?$ Again, not seeing an obvious pattern. + +Looking at option B: For $①④⑥$ They all have some kind of irregular geometric shape. For $②③⑤$ They all seem to have more regular geometric elements. + +Looking at option C: For $①③④$ : Not seeing a clear pattern. For $②⑤⑥$ : Not seeing a clear pattern. + +Looking at option D: For $124$ These all have shapes with straight lines and angles. For $356$ These all have some curved elements - $③$ has an arc on top, $⑤$ is a + +circle, $⑥$ has curved sides + +I think option D might be the answer. The distinction seems to be: + +-Group $①②④$ Figures with only straight lines and angles + +- Group $③⑤⑥$ Figures that incorporate some curved elements + + + +Analyzing Six Geometric Figures + +I need to find a pattern that separates these six figures into two categories. Let me examine each figure carefully: + +Figure ①: Square with an internal shape that has straight edges (looks like a doorway or page) + +Figure ②: Square with an L-shaped internal figure made of straight lines +Figure ③: Square with a trapezoid/parallelogram shape that has a curved arc on top +Figure ④: Square with zigzag/lightning-shaped internal figure made of straight lines +Figure ⑤: Square with a circle (curved) containing an X +Figure ⑥: Square with what appears to be a stylized figure with curved edges + +Looking at potential pattern differences: + +Option D: ①②④, ③⑤⑥ + +- Group ①②④: All figures contain ONLY straight lines and angular shapes +- Group $③⑤⑥$ All figures incorporate curved elements (arc in $③$ circle in $⑤$ curved sides in $⑥$ +This is a clear and consistent pattern. The first group has exclusively straight-line geometry, while the second group all incorporate curved elements in their design. + +The other options don't reveal as clear a pattern distinction. + +Answer: D + +# Inductive (Hard) + +![](images/c6fc1a83d6a9bc93b8784359ac076f5ca84d132570716648ebe7672867c6bd45.jpg) +Figure 36: A sample error case of Inductive Reasoning (difficulty: Hard). + +![](images/1994f7acc55cf3c5043ee5cd9ca074533673ed5163c5a6430e8a63567a075735.jpg) + +![](images/b3a81caaab7e6d1a6308effe863bcf676eca4ca870e4b241ebb9c196a80297ae.jpg) +A B C D + +Question: Choose the most appropriate option from the four given choices to fill in the question mark, so that the figures follow a pattern. + +Ground Truth: (B) + +Claude-3.7-Sonnet-Thinking: Let me analyze this pattern recognition problem step by step. + +The question shows a sequence of 5 geometric figures, followed by a question mark. I need to determine which of the four options (A, B, C, or D) should replace the question mark to continue the pattern. + +# The sequence is: + +1. Two overlapping circles +2. A triangle inside a circle +3. A shape that looks like an oval with horizontal lines +4. A square with a triangle inside or a diagonal line +5. A pentagon (5-sided polygon) + +? (The figure we need to determine) + +# Options: + +A. A hexagon (6-sided polygon) +B. A rectangle with a diagonal line +C. A square with a grid/cross inside + +D. A triangle + +# Let me look for patterns: + +- If we're counting sides: circles (infinite sides), triangle (3 sides), oval (curvy, so infinite?), square/rectangle (4 sides), pentagon (5 sides) +- If looking at the number of sides, it might be increasing: $\infty, 3, \infty, 4, 5\ldots$ which would suggest 6 sides next (hexagon) +- Or it could be about complexity of the shapes or some transformation between them + +Looking at option A (hexagon), this would follow if the pattern is increasing the number of sides: pentagon (5 sides) $\rightarrow$ hexagon (6 sides). + +This seems like the most logical pattern - the progression of regular polygons with increasing number of sides: + +- Pentagon (5 sides) +- Hexagon (6 sides) +- Heptagon (7 sides) +- Octagon (8 sides) + +etc. + +So, the answer appears to be A (hexagon). + +Let me double-check if there's another pattern I'm missing: +- The shapes aren't consistently "shape within a shape" so that's not the pattern +- It's not about filled vs. unfilled shapes +- It doesn't appear to be about rotation or reflection + +The increasing number of sides seems to be the most consistent pattern: + +-∞ sides (circles) +-3 sides (triangle) +- Curved shape (represents $\infty$ again or could be a special case) +- 4 sides (square/rectangle) +- 5 sides (pentagon) +- ? (logically 6 sides, which would be a hexagon) + +Therefore, option A (hexagon) appears to be the correct answer. + + + +I need to identify the pattern in this sequence of figures and determine which option should replace the question mark. + +Let me analyze the sequence: + +1. Two overlapping circles +2. A triangle inside a circle +3. An oval with horizontal lines +4. A quadrilateral with a diagonal line +5.A pentagon (5-sided polygon) +6.2 (This is what I need to find) + +Looking at these shapes, I notice a potential pattern in the number of sides of the primary shapes: + +- First shape: Circles (infinite sides) +- Second shape: Triangle (3 sides) +- Third shape: Oval (curved shape) +- Fourth shape: Quadrilateral (4 sides) +- Fifth shape: Pentagon (5 sides) + +It appears the pattern is showing an increase in the number of sides as we move from the triangle to quadrilateral to pentagon. Following this pattern, the next shape should have 6 sides. + +Looking at the options: + +- Option A: Hexagon (6-sided polygon) +-Option B:Rectangle with diagonal line +- Option C: Square with grid +-Option D:Triangle + +Since the logical progression is to a 6-sided figure after the 5-sided pentagon, option A (hexagon) is the correct answer to continue the pattern. + +Answer: A + +# Error Category: Reasoning Error + +Error Reason: The model overcomplicates the pattern and misinterprets the progression of shapes. It assumes the sequence is based on increasing the number of polygon sides and selects the hexagon (option A), expecting a 6-sided figure to follow the 5-sided pentagon. However, the actual pattern does not follow this sequence, as could be reasoned from the first three figures. + +# Inductive (Hard) + +![](images/6dcacdbc633bf2d93fe3048943845a396a23ab6786fee3bb15fbaa39e85fab8c.jpg) + +![](images/17adf262a76fecf8616abb3242adf93be470dbd486216a32ecc0b909fe895b5c.jpg) +Figure 37: A sample correct case of Inductive Reasoning (difficulty: Hard). + +![](images/e9bc9fcac4a2371d182b0f22eb739189041818bfaeb2e9d2b079bab6c84fa652.jpg) + +Question: Choose the most appropriate option from the four given choices to fill in the question mark, so that the figures follow a pattern. + +Ground Truth: (C) + +Claude-3.7-Sonnet-Thinking: This question asks me to identify a pattern in a 3x3 grid of figures and then determine which of the four options (A, B, C, D) should replace the question mark to maintain that pattern. + +Let me analyze each row of the grid to understand the pattern: + +Row 1: Contains line drawings or outlines (thin lines). + +Row 2: Contains solid black shapes (filled in). + +Row 3: Contains circular patterns, with the third position having a question mark. + +Now I need to determine what pattern connects each column and row to find the right answer. + +Looking at each column: + +- Column 1: A line drawing at top, a solid shape in middle, and a circular pattern at bottom + +- Column 2: Same structure as column 1 + +- Column 3: Same structure for the first two rows, but the third position has the question mark + +Now let's look more closely at the circular patterns in row 3: + +- Position (3,1): Shows circles arranged in a pattern that seems to form two connected groups + +- Position (3,2): Shows circles arranged in a different pattern, also with connected groups + +I need to determine what the relationship is between the patterns in the first two rows and how that might indicate what should be in position (3,3). + +Looking at row 1 again: + +- (1,1): Lines forming an angular shape + +- (1,2): Lines forming another angular shape + +- (1,3): Lines forming a third angular shape with an X + +Row 2: + +- (2,1): Solid black angular shape + +- (2,2): Solid black angular shape + +- (2,3): Solid black angular shape + +I notice that each row seems to maintain the same visual "language" (lines, solid shapes, or circles), but the specific arrangement changes. + +Let me check if there's a transformation happening across each row or down each column. + +One possibility is that row 3 should continue some pattern from rows 1 and 2. Looking at the options: + +Option A: Circles arranged in a pattern + +Option B: Circles arranged in a pattern + +Option C: Circles arranged in a pattern + +Option D: Circles arranged in a pattern + +Looking purely at the row 3 pattern development: + +- (3,1) has circles arranged in what appears to be a specific pattern + +- (3,2) has circles arranged in a different pattern + +- (3,3) should continue this progression in some logical way + +Given the progression from (3,1) to (3,2), and looking at the overall pattern of the grid, the most logical answer appears to be option C, as it seems to follow the pattern of arrangements in row 3. + +However, I'm not completely confident without being able to precisely determine the transformation rule. Let me go with option C as my best guess based on the visual patterns I can observe. + + + +# Pattern Analysis for Figure Sequence + +I need to identify the pattern in this $3 \times 3$ grid to determine what should replace the question mark. + +Analyzing the rows: + +1. **Row 1**: Contains simple line drawings (outlines) + +2.**Row 2**: Contains solid black shapes (filled versions) + +3.**Row $3^{**}$ : Contains arrangements of circles + +Analyzing the columns: + +Each column follows a consistent transformation pattern: + +- The top row shows a basic line pattern + +- The middle row shows the same pattern filled in black + +- The bottom row represents the same pattern using circles + +## Looking at the third column: + +- Position (1,3): A line pattern with an "X" and some angles + +- Position (2,3): A solid version with square and triangular elements + +- Position (3,3): Should show the same pattern represented with circles + +Examining the options: + +- $^{**}$ Option A\*\*: Circles arranged in an L-shape with some outliers + +- $^{**}$ Option B\*\*: Circles arranged in a zigzag pattern + +- $^{**}$ Option C\*: Circles arranged in a figure-8 or loop pattern with outlying circles + +- $^{**}$ Option D\*\*: Circles arranged in a C-shape with additional elements + +Looking at how the circle arrangements in positions (3,1) and (3,2) correspond to their respective shapes above them, option C best represents the pattern in the third column. The arrangement in C appears to maintain the key elements of the pattern shown in positions (1,3) and (2,3). + +Answer: C + +# Spatial (Easy) + +![](images/80da2aa740657911acc5a646847ccea63c96b896cfdddfe6c9198ef82424b766.jpg) + +![](images/904965bbf4265f24d2577a52604da12422da4d054e267cebc6d8b1a6743bfb7f.jpg) +Figure 38: A sample error case of Spatial Reasoning (difficulty: Easy). + +Question: How many blocks are in this 3D tower? + +Options:A:7.B:8.C:9.D:10 + +Ground Truth: (C) + +Claude-3.7-Sonnet-Thinking: I need to count the number of blocks in this 3D tower. Let me analyze the structure carefully. + +The tower appears to have several levels with blocks arranged in a step-like pattern. I'll try to count methodically by identifying each visible block. + +Looking at the image: + +- At the bottom level (ground level), I can see 3 blocks arranged in an L-shape. +- At the second level up, I can see 2 blocks. +- At the top level, I can see 1 block. + +So far that's $3 + 2 + 1 = 6$ blocks. + +However, I need to be careful as this is a 3D structure and there might be blocks that are partially or completely hidden from view. + +Bottom level: I see what appears to be an L-shape with 3 visible cubes. There might be a 4th cube completing the square $(2\times 2)$ , but it's not clearly visible. + +Middle level: I see 2 cubes. + +Top level: I see 1 cube. + +Given the perspective and the way 3D blocks are usually represented, it seems most likely that there are 7 blocks total $(3 + 3 + 1)$ but I need to be very careful about blocks that may be hidden from view. + +Actually, let me count one more time carefully: + +1. Bottom front left block +2. Bottom front right block +3. Bottom back left block (creating the L-shape) +4.Middle front block +5.Middle back block +6. Top block + +7. Bottom back right block (which would complete the $2 \times 2$ square at the bottom) - this one is not clearly visible but would typically be present in a stable structure + +That gives 7 blocks. + +After careful consideration, I believe the answer is 7 blocks. + +Actually, I'm not confident about my count. Let me try to visualize this again: + +The structure appears to be: + +- Bottom layer: Looks like a $2 \times 2$ arrangement (4 blocks) + +- Middle layer: Appears to be 2 blocks + +-Top layer:1 block + +That would give us $4 + 2 + 1 = 7$ blocks total. + +But wait, the image shows what looks like steps. Let me try to count each visible cube: + +1. Bottom front left + +2. Bottom front right + +3.Middle left + +4.Middle right + +5. Top middle + +6. Bottom back left (partially visible) + +7. Bottom back right (might be hidden) + +Based on what I can see in the image, there appear to be 7 blocks, which corresponds to option (A). + + + +# Counting Blocks in the 3D Tower + +To solve this problem, I need to carefully count all the blocks in this 3D structure, including any that might be partially hidden from view. + +Let me work through this systematically by analyzing each level of the tower: + +Bottom Level (ground level): + +I can see what appears to be a $2 \times 2$ arrangement of blocks: + +That's 4 blocks on the bottom level. + +Middle Level: + +Moving up one level, I can see: + +- Middle left block (above the bottom front left) + +- Middle right block (above the bottom back left) + +That's 2 blocks on the middle level. + +Top Level: + +At the very top, there is: + +- One block positioned above the middle right block + +That's 1 block on the top level. + +Total Count: + +Adding all levels: $4 + 2 + 1 = 7$ blocks total + +Therefore, there are 7 blocks in this 3D tower. + +Answer: A + +Error Category: Spatial / Orientation Error + +Error Reason: The model makes a spatial reasoning error by assuming only 3 blocks at the bottom level, + +overlooking a likely 4th hidden cube that would complete the L-shape into a full $2 \times 2$ square base. + +# Spatial (Easy) + +![](images/cefa652b64bca2cb3ded5a974bd666119b76399473475757d6e80f5058d67cca.jpg) + +![](images/1dca39e1bbec887164256f6110fefe845dd7ef508126048d7df6d8430b767d84.jpg) +Figure 39: A sample correct case of Spatial Reasoning (difficulty: Easy). + +Question: Some kids are facing towards front and others are facing towards back. How many kids are holding another kid's hand with their right hand? + +Options: A: 2. B: 6. C: 5. D: 1 + +Ground Truth: (C) + +Claude-3.7-Sonnet-Thinking: Let's analyze the image and the problem: + +In the image, I can see 6 kids standing in a chain holding hands with each other. Some are facing forward and some are facing backward. + +The question asks: "How many kids are holding another kid's hand with their right hand?" + +To solve this, I need to identify for each kid whether they are using their right hand to hold another kid's hand. + +# Going from left to right: + +1. Kid 1 (blonde hair, purple dress): This kid appears to be facing forward. Her right hand would be on her right side from our perspective. She's using her right hand to hold the hand of Kid 2. +2. Kid 2 (gray hair): Appears to be facing forward. Using right hand to hold Kid 3's hand. +3. Kid 3 (green outfit): Appears to be facing forward. Using right hand to hold Kid 4's hand. +4. Kid 4 (black outfit): Appears to be facing backward (away from us). From Kid 4's perspective, their right hand would be on our left. So Kid 4 is using their right hand to hold Kid 3's hand. +5. Kid 5 (pink hair, yellow skirt): Appears to be facing backward. Using right hand to hold Kid 4's hand. +6. Kid 6 (light green/mint hair): Appears to be facing forward. Using right hand to not hold anyone (end of chain). + +So, counting the kids who are using their right hand to hold another kid's hand: + +Kid 1: Yes + +Kid 2: Yes + +Kid 3: Yes + +Kid 4: Yes + +Kid 5: Yes + +Kid 6: No (end of chain) + +That gives us 5 kids using their right hand to hold another kid's hand. + +However, I need to double-check my reasoning about which direction each kid is facing, as this affects which hand is their right hand. + +# Looking more carefully: + +- Kid 1 (blonde): Facing forward, right hand holding Kid 2's hand. Yes. +- Kid 2 (gray hair): Facing forward, right hand holding Kid 3's hand. Yes. +- Kid 3 (green outfit): Facing forward, right hand holding Kid 4's hand. Yes. +- Kid 4 (black outfit): Facing backward, right hand (from their perspective) holding Kid 3's hand. Yes. +- Kid 5 (pink hair): Facing backward, right hand holding Kid 4's hand. Yes. +- Kid 6 (mint hair): Facing forward, right hand not holding anyone's hand. No. + +So, there are 5 kids holding another kid's hand with their right hand. + +# The answer should be (C) 5. + +# + +I need to determine which kids are holding another kid's hand with their right hand by analyzing the image and understanding each child's orientation. + +Looking at the image, I can see 6 kids standing in a chain holding hands with each other. Some are facing forward and some are facing backward. + +Let me analyze each child from left to right: + +1. First child (blonde girl in purple dress): Facing forward. She's using her right hand to hold the second child's hand. +2. Second child (gray hair): Facing forward. Using right hand to hold the third child's hand. +3. Third child (green outfit): Facing forward. Using right hand to hold the fourth child's hand. +4. Fourth child (black outfit): Facing backward. From their perspective, their right hand is holding the third child's hand. +5. Fifth child (pink hair in yellow skirt): Facing backward. Their right hand is holding the fourth child's hand. +6. Sixth child (light-colored hair): Facing forward. This child is at the end of the chain on the right, so their right hand is not holding anyone's hand. + +Counting the kids who are using their right hand to hold another kid's hand: 1st, 2nd, 3rd, 4th, and 5th kids = 5 children total. + +# Spatial (Medium) + +![](images/14b366f4185c02e93ebe4219a693c366bf81475a9410c98c0e1a41b887c3316c.jpg) + +![](images/8e7a8efbb9442d3da6e11d1452e1c3d6dfed84fbbc0dca5848c6be76f2f510cd.jpg) + +![](images/1639cb2aa2bb59d6aaebd4716fe3ca69f37c2ae86b1df24cdb81e1da99dfd062.jpg) +Figure 40: A sample error case of Spatial Reasoning (difficulty: Medium). + +![](images/9b686a95436f9a02fa717ed03cad01e1e16f1c7cc446ebdb14615dd0b806dd7a.jpg) + +![](images/adf5076b567b494c09c346d232e7291ede55ef3f81babce364269d00e4cdb522.jpg) + +![](images/bea696fa24270a2378f3ea6791555d4d9e3504dd156e7e882d77f1f7fd86dcd8.jpg) + +![](images/fd296ebae832def4298738b303e6e0ecdc616dbe78e9eb939acb6ded360c9ebd.jpg) + +Question: Which cube can be made based on the unfolded cube? + +Ground Truth: (A) + +# Claude-3.7-Sonnet-Thinking: + +Let me analyze the folded cube first to understand what I'm looking for: +- The cube appears to be mostly gold/yellow with black triangular patterns on some faces +- I can see 3 faces of the cube: +- Front face: Has a black triangular pattern pointing downward +- Top face: Has a black triangular pattern pointing to the right +-Right face: Has a black triangular pattern pointing to the left + +Now I need to examine each unfolded pattern to see which one would create these patterns when folded into a cube. I'll need to mentally fold each pattern and check if the triangular markings align with what I see in the target cube. + +# For option (A): + +The unfolded pattern shows: + +- A T-shape with triangular patterns on some faces +- When I mentally fold this, the triangular patterns don't seem to align with the target cube. The arrangement doesn't match what we see in the folded cube. + +# For option (B): + +The unfolded pattern has: +- A cross-like shape with triangular patterns +- When folded, this arrangement might create the patterns we see in the target cube. I need to check this more carefully. + +# For option (C): + +This unfolded pattern has: +- An L-shape with additional squares and triangular patterns +- When mentally folded, the patterns don't seem to match the target cube. + +# For option (D): + +This unfolded pattern has: +- A cross-like shape with triangular patterns in different orientations +- When mentally folded, I need to check if this could create the target cube. + +Let me think about this more methodically. I need to trace each face from the unfolded pattern and see how they would align in 3D space. + +# Looking at option (B) more carefully: + +If I consider the cross-shape as the standard cube net, with the center becoming the front face, then: +- The center face has a triangular pattern pointing downward, which matches the front face of the target cube +- The face to the right of center would become the right face, and it has a triangular pattern that would point left when folded, matching the target +- The face above the center would become the top face, and it has a triangular pattern that would point right when folded, matching the target +This seems to be a match for the folded cube shown at the top. + +Let me double-check option (D): The arrangement of the triangular patterns doesn't seem to match what we see in the folded cube, particularly the orientation of the triangles. + +# I think option (B) is the correct answer. + + + +Analyzing Which Unfolded Pattern Creates the Target Cube + +I need to determine which of the unfolded cube patterns (a, b, c, d) would create the folded cube shown at the top when assembled. + +# Examining the Target Cube + +First, I'll identify what's visible on the folded cube: +- Front face: Has a black triangular pattern pointing downward +- Top face: Has a black triangular pattern pointing to the right +- Right face: Has a black triangular pattern pointing to the left + +Analyzing Each Unfolded Pattern + +Option (a): When folded, the triangular patterns wouldn't match the configuration in the target cube. The orientations and positions of the triangles don't align correctly. + +# Option (b): If I mentally fold this pattern: + +- The central face (with downward-pointing triangle) would be the front face +- The face above it (with rightward-pointing triangle) would become the top face +- The face to the right of center (with leftward-pointing triangle) would become the right face +This perfectly matches the pattern seen in the target cube. + +Option (c): The arrangement of faces and triangular patterns in this unfolded net wouldn't create the target cube. The orientations of the triangles don't match when folded. + +Option (d): While this has triangular patterns, when folded the orientations don't match what we see in the target cube. + +Therefore, option (b) is the only unfolded pattern that would create the target cube when folded. + +Answer: B + +# Error Category: Spatial / Orientation Error + +Error Reason: The model incorrectly visualizes how the triangular patterns on the unfolded net align when folded into a cube. It selects option B, mistakenly believing the triangle orientations will match the folded view, but fails to correctly account for how the triangles rotate and face different directions when assembled. + +# Spatial (Medium) + +![](images/5173ddc5146d9cfeb2d74a7a9f9bb884fa2ce8df452523de82ced9c0974ae68a.jpg) +Figure 41: A sample correct case of Spatial Reasoning (difficulty: Medium). + +![](images/65301e5a16dcfa61b2c31b244def2e72eb53721f2d5f98a279f1ff866ed35b6e.jpg) + +![](images/c8fb5f58b9700cf16ecc6cb3575f9894069c4c885cbea362069dec97779eacbd.jpg) + +![](images/10bcbe79e5c7c5d962a5e38863c039aae6fad5ad4603da5cacbc3e1b9c18cc88.jpg) + +![](images/2bbfec971459982369375ff4238e17715565acb6e3858aa51c607ea4534d78fc.jpg) + +Question: Paulo took a rectangular sheet of paper, yellow on one side and green on the other side and, with several folds shown in the dotted lines in the figure below, made a little paper plane. To give the airplane a charm, Paulo made a circular hole, marked on the last figure. After playing a lot with the plane, Paulo unfolded the sheet and realized that there were several holes in it. How many holes did he count? + +Options: A: 6. B: 7. C: 8. D: 9 + +# Ground Truth: (C) + +Claude-3.7-Sonnet-Thinking: Let me analyze this problem step by step. + +We're looking at a sheet of paper that is folded in several steps to make a paper plane, and then a circular hole is punched through the folded paper. We need to determine how many holes will be visible when the paper is unfolded. + +From the image, I can see: + +1. A rectangular yellow sheet of paper (yellow on one side, green on the other) +2. The paper is folded along dotted lines in several steps +3. In the final paper plane configuration, a circular blue hole is punched through + +When a hole is punched through multiple layers of folded paper, it creates holes in each layer. The number of holes depends on how many layers of paper were pierced by the hole. + +Looking at the folding process: + +- Second image: Top portion folded to create a triangular shape (green side now showing on top) +- Third image: Side portions folded (more green showing) +- Fourth image: Further folding +- Fifth image: Final paper plane with a blue circular hole + +To determine the number of holes, I need to figure out how many layers of paper were at the location where the hole was punched in the final configuration. + +It's a bit difficult to precisely determine from just these diagrams, but I can make a reasonable analysis: + +In a typical paper airplane, the center fold creates 2 layers, and then additional folds along the sides can create more layers. The hole appears to be punched near the tip of one wing. + +When we fold paper, the hole punched through multiple layers will create a hole in each layer. If we look at the final image, it seems the hole is punched through a section where multiple folds have occurred. + +Based on the folding pattern shown, the paper appears to have been folded in a way that would result in multiple layers at the location of the hole. + +Without seeing exactly how many layers overlap at the punch point, it's hard to give a definitive answer. However, given the folding pattern shown, and considering standard paper airplane folding techniques, I would estimate that the hole might pass through approximately 8 layers. + +However, since this is a multiple-choice question with specific options, I should choose among (A) 6, (B) 7, (C) 8, or (D) 9. + +Given the complexity of the folds and the location of the hole in the final plane, and looking at the options, I believe option (C) 8 is the most reasonable answer. The hole likely went through 8 layers of the folded paper, resulting in 8 holes when unfolded. + +Actual, let me think about this more carefully. I need to trace each fold and determine exactly how many layers are at the position where the hole is punched. + +The hole appears to be near one tip of the final paper plane. Based on the folding pattern: + +1. The initial fold creates a triangle on top (1 fold, 2 layers) +2. Then side folds create more layers +3. The final folds create the completed plane + +Typical paper airplanes can have different numbers of layers at different points. The tip might have 8 layers if several folds pass through that area. + +Given the options and the standard folding patterns for paper planes, I'll go with option (C) 8 holes. + + + +Counting Holes in the Unfolded Paper + +I need to determine how many holes appear when Paulo unfolds his paper plane after making a single circular hole in the folded plane. + +Analyzing the Folding Process + +Looking at the image sequence: +1. We start with a flat rectangular sheet that's yellow on one side and green on the other +2. The paper goes through several folding steps (shown by dotted lines) +3. In the final paper plane configuration, a single circular hole (blue) is punched through + +Why Multiple Holes Appear + +When a hole is punched through multiple layers of folded paper, it creates a hole in each layer. The number of holes in the unfolded paper equals the number of layers that were penetrated by the single punch. + +## Counting the Layers + +Following the folding process carefully and looking at the final plane: + +- The initial rectangular sheet is folded several times +- Each fold creates multiple overlapping layers +- The hole appears to be punched through the right wing of the completed plane + +Based on the specific folding pattern shown in the diagrams, when the sheet is completely folded into the paper plane configuration, the location where the hole is punched consists of 8 layers of paper stacked together. + +Therefore, when Paulo unfolds the paper, he would find 8 holes in the sheet - one for each layer that was penetrated by the single punch. + +Answer: C + +# Spatial (Hard) + +![](images/a04ebb8a43502409b5fd77351daf18ccb85f6f642ae25c7224e556230afbcbc6.jpg) + +![](images/14c6ef28685649387593825d78b70250342fe608769574e9273cfde733c2252b.jpg) +Figure 42: A sample error case of Spatial Reasoning (difficulty: Hard). + +![](images/aab8e64fbb9a68e5fd7bb86a766ebaf859645b89ced9f8bad955ead33c881c8a.jpg) + +Question: Betty walked around the park once, starting from the marked point in the direction of the arrow. She took 4 pictures. In which order did she take the pictures? + +Options: A: 1234. B: 1324. C: 2143. D: 2341 + +# Ground Truth: (C) + +Claude-3.7-Sonnet-Thinking: This problem requires me to determine the order in which Betty took 4 pictures as she walked around a park. I need to look at the diagram and figure out the sequence. + +First, I'll analyze what I see in the leftmost diagram, which seems to show the park layout: + +- There's a cylindrical object (like a column or tree trunk) +- There's a cone-shaped object (like a pine tree or monument) +- There's a dome-shaped object (like a small hill or rock) +- There's an arrow indicating the direction Betty walked + +Then I have 4 numbered pictures, each showing different arrangements of these objects: +- Picture 1: Shows the dome shape on the left and the cylinder on the right +- Picture 2: Shows the cone shape on the left and the cylinder on the right +- Picture 3: Shows the cylinder on the left and the dome shape on the right. +- Picture 4: Shows the cylinder on the left and both the dome shape and cone shape on the right + +I need to determine the order in which Betty would see these views as she walks around the park in the direction of the arrow. + +Looking at the main diagram, the arrow indicates she's walking counterclockwise around the park. + +If I start at the marked point and move in the direction of the arrow (counterclockwise): + +1. Betty would first see the dome on her left and the cylinder on her right - this matches Picture 1 +2. As she continues counterclockwise, she would next see the cone on her left and the cylinder on her right - this matches Picture 2 +3. Continuing further, she would see the cylinder on her left and the dome on her right - this matches Picture 3 +4. Finally, completing the circle, she would see the cylinder on her left with both the dome and cone on her right - this matches Picture 4 + +So the order of the pictures would be 1, 2, 3, 4, which corresponds to option (A) 1234. + +Let me double-check this logic: + +From the starting position and moving counterclockwise: + +- First view: dome on left, cylinder on right (Picture 1) + +- Second view: cone on left, cylinder on right (Picture 2) + +- Third view: cylinder on left, dome on right (Picture 1) + +- Fourth view: cylinder on left, dome and cone on right (Picture 4) + +This confirms that the answer is (A) 1234. + + + +# Determining Picture Order in Betty's Park Walk + +I need to figure out the order in which Betty took 4 pictures as she walked once around the park in the direction of the arrow. + +Analyzing the Park Layout + +From the first diagram, I can see: + +- A cylindrical object (looks like a column/tree trunk) +- A cone-shaped object (like a pointed tree or monument) +- A dome-shaped object (like a small hill or rock) +- An arrow indicating counterclockwise movement + +Analyzing Each Picture + +1. **Picture 1**: Shows the dome shape on left, cylindrical object on right +2. **Picture 2**: Shows the cone shape on left, cylindrical object on right +3. **Picture 3**: Shows the cylindrical object on left, dome shape on right +4. **Picture 4**: Shows the cylindrical object on left, both dome and cone shapes on right + +Determining the Sequence + +Starting from the marked point and moving counterclockwise (following the arrow): +1. First, Betty would see the dome on her left and the cylinder on her right $\rightarrow^{**}$ Picture $1^{**}$ +2. Continuing counterclockwise, she would next see the cone on her left and the cylinder on her right $\rightarrow^{**}$ Picture $2^{**}$ +3. Moving further, she would see the cylinder on her left and the dome on her right $\rightarrow^{**}$ Picture 3\*\* +4. Finally, she would see the cylinder on her left with both the dome and cone on her right $\rightarrow^{**}$ Picture $4^{**}$ + +This gives us the sequence 1-2-3-4. + +Answer: A + +Error Category: Spatial / Orientation Error + +Error Reason: The model makes a spatial orientation error by incorrectly interpreting the shifting perspectives as Betty walks around the park, especially at the starting point. It assumes a consistent viewpoint but fails to mentally rotate the scene to match what Betty would actually see from each new position. + +# Spatial (Hard) + +![](images/2586382fa16ad27a1d1de419769a9996fc1643298d2473d0ee193d5899f640c7.jpg) + +![](images/6e1c6e63d19fe7d1bf2557d0f987c2866548f050e00b0b561dc19978100bddba.jpg) +Figure 43: A sample correct case of Spatial Reasoning (difficulty: Hard). + +A + +![](images/b7644b24bb6cc17803106504397075a33326ffdc8d6e7a7d2ceb81edbe695538.jpg) + +B + +![](images/988fe318a4b806c51614855dc118b5905c5628e9d1a730257062c2d4125fef59.jpg) + +C + +![](images/b762364e4fbdadb57bcfa6d9c604c9302b6522c57bd98dcd1c60d88d2028f45b.jpg) + +D + +Question: From the flat expansion view of paper boxes, which paper box is different from the other three? + +Ground Truth: (A) + +Claude-3.7-Sonnet-Thinking: This problem is asking me to identify which one of the four flat expansions of paper boxes is different from the others. Let me analyze each one carefully. + +These patterns represent unfolded 3D boxes (nets), where the squares would fold to create a 3D shape. The black squares and circles represent some kind of marking or pattern on the surface of the box. + +Let me analyze the structure of each expansion: + +A: This appears to be a linear arrangement of 5 squares with a T-shape extension. The pattern has black regions in various squares. +B: This is a more complex shape with 7 squares arranged in a cross-like pattern. It has black regions in various squares. +C: This also appears to be an arrangement of squares that could fold into a box, with black markings. +D: This is another cross-shaped arrangement of squares with black markings. + +To determine which one is different, I need to see if they would all fold into valid 3D boxes and if there's any pattern that sets one apart. + +Let me think about how these would fold: +- A cube has 6 faces, so a valid net should have exactly 6 squares +- The squares must be connected in a way that allows folding into a cube + +Let me count the squares in each: + +A: I count 5 squares +B: I count 7 squares +C: I count 7 squares +D:1 count 7 squares + +Looking at the number of squares, A has 5 squares while B, C, and D each have 7 squares. A cube needs 6 faces, so none of these would fold into a perfect cube. A is different in that it has fewer squares than the others. + +However, let me look more carefully at these patterns to see if there's another difference. + +Actually, let me recount: + +A: Looking more carefully, I see 6 squares arranged in a T-pattern with an extension +B: I see 7 squares arranged in a cross-pattern with extensions +C: I see 7 squares arranged in a pattern +D: I see 7 squares arranged in a cross-pattern + +So option A has 6 squares, while B, C, and D each have 7 squares. Since a cube has 6 faces, A is the only one that could potentially fold into a proper cube. The others have an extra square. + +Let me check once more carefully: + +A: I count squares from left to right and top to bottom: 1, 2, 3, 4, 5, 6. So there are 6 squares. +B: I count 7 squares +C: I count 7 squares. +D: I count 7 squares. + +So based on the number of squares, A is different from the others because it has 6 squares while the others have 7 squares. + +But let me think about whether there could be another difference that's more subtle, like the pattern of black regions or the connectivity of the squares. + +Looking at the patterns of black and white regions, they all seem to have black regions and circles in different arrangements. Looking at the connectivity of the squares, they all have different patterns of how the squares connect. + +I think the key difference is the number of squares. A has 6 squares which is the exact number needed for a cube. B, C, and D all have 7 squares, which is one too many for a standard cube - these would need to have one square overlapping or not used when folded. + +So my answer is A. + + + +Analysis of Paper Box Flat Expansions + +I need to determine which of these flat expansions (nets) is different from the others by examining their structure and properties. + +First, let me count the number of squares in each option: +- Option A: 6 squares arranged in an L-shape with an extension +- Option B: 7 squares arranged in a cross pattern with extensions +- Option C: 7 squares arranged in a T-shape with extensions +- Option D: 7 squares arranged in a cross pattern with extensions + +The key difference I notice is that option A has exactly 6 squares, while all the other options (B, C, and D) have 7 squares each. + +This is significant because a cube has exactly 6 faces, so a proper net for a cube should have exactly 6 squares. Options B, C, and D all have one extra square, which means they would either: + +1. Not fold into a proper cube +2. Have an overlapping square when folded +3. Require leaving one square unused + +Therefore, option A is different from the other three because it's the only one with the correct number of squares (6) to form a standard cube. + +Answer: A \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10342/images/00e50a1568abcd54465fc320e5e371cbde25a42d012dacc8fef4c74e1687b8cd.jpg b/data/2025/2504_10xxx/2504.10342/images/00e50a1568abcd54465fc320e5e371cbde25a42d012dacc8fef4c74e1687b8cd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c70d2fc9a93ada8f151309e12c69c7c372d84bb9 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/00e50a1568abcd54465fc320e5e371cbde25a42d012dacc8fef4c74e1687b8cd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89ad1d0a3a3f816124f9da024a9144ef435dfb15beba501bf7e9c3b4befaab8f +size 1143 diff --git a/data/2025/2504_10xxx/2504.10342/images/01e60a93178e8267e318f99e39c4cca1b82b8f7d2705c3cfe0ae49ccbac99c55.jpg b/data/2025/2504_10xxx/2504.10342/images/01e60a93178e8267e318f99e39c4cca1b82b8f7d2705c3cfe0ae49ccbac99c55.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8891fa9fae9a0af4286f8537073c7a49516d1396 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/01e60a93178e8267e318f99e39c4cca1b82b8f7d2705c3cfe0ae49ccbac99c55.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de192e5c1ecbb01d927de6c9527dcab4ebf6538e77c80101743a57d8632cad19 +size 1140 diff --git a/data/2025/2504_10xxx/2504.10342/images/085189f854c325765c80bfc7f114017ecfa432448aa0ef26f6e006d72565f3fc.jpg b/data/2025/2504_10xxx/2504.10342/images/085189f854c325765c80bfc7f114017ecfa432448aa0ef26f6e006d72565f3fc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..70c3a30592f61f201781fd749b47d0182b68867e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/085189f854c325765c80bfc7f114017ecfa432448aa0ef26f6e006d72565f3fc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e0f272e12af7d87574155ce6ec5b7f2861e63d800d6119ebe30845b47de3371 +size 19672 diff --git a/data/2025/2504_10xxx/2504.10342/images/090c5d20e7dbcee00cc16fc3ac0259c5d9be49d302ec9a733870fb9c076fd3e8.jpg b/data/2025/2504_10xxx/2504.10342/images/090c5d20e7dbcee00cc16fc3ac0259c5d9be49d302ec9a733870fb9c076fd3e8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9f8a346ffe4131bff4eec52adba0fa071d557bce --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/090c5d20e7dbcee00cc16fc3ac0259c5d9be49d302ec9a733870fb9c076fd3e8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:708ccdaba1fd0cecc558462c395907235d05c6d63f948bd05ccbe8421af975f9 +size 10380 diff --git a/data/2025/2504_10xxx/2504.10342/images/0920247e737549ac6c013dd4ad786a1d0456780992c80a82459f78cbac53edd4.jpg b/data/2025/2504_10xxx/2504.10342/images/0920247e737549ac6c013dd4ad786a1d0456780992c80a82459f78cbac53edd4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6dae5bbc7ff61e55d82e84c1832829f19aa932a8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/0920247e737549ac6c013dd4ad786a1d0456780992c80a82459f78cbac53edd4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f9a60accfb4a93e4deeaa7db00fa207a64aa1d182e992bdf0e57d5e759ba639 +size 736 diff --git a/data/2025/2504_10xxx/2504.10342/images/094995f7fd6661fa2cf01e8206d18ba8f31705d31dd9700b64a1e8c398d45ed3.jpg b/data/2025/2504_10xxx/2504.10342/images/094995f7fd6661fa2cf01e8206d18ba8f31705d31dd9700b64a1e8c398d45ed3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..878f2f0820143f9fc8a032826572f459b72ee304 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/094995f7fd6661fa2cf01e8206d18ba8f31705d31dd9700b64a1e8c398d45ed3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84514e7b8fdc028963a8d5ab5229471bbfdb122b1f36e8ae11e8589414096fa7 +size 1869 diff --git a/data/2025/2504_10xxx/2504.10342/images/097011d809433a79022988b41408051bc1ba5980c55cdc75b274a14638a0f3d4.jpg b/data/2025/2504_10xxx/2504.10342/images/097011d809433a79022988b41408051bc1ba5980c55cdc75b274a14638a0f3d4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a964a6fd1630c54296a01cec9085ce81aaff7ac8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/097011d809433a79022988b41408051bc1ba5980c55cdc75b274a14638a0f3d4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cedab27c7aa64b7398d66785002ac04d016140784a9f3b343cc45f2d90bdc155 +size 13936 diff --git a/data/2025/2504_10xxx/2504.10342/images/0c9852c365df3b1611c71ccabfed0cc26a574ebf7515ee1488d453c71195d0e9.jpg b/data/2025/2504_10xxx/2504.10342/images/0c9852c365df3b1611c71ccabfed0cc26a574ebf7515ee1488d453c71195d0e9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aace3f671a7cd3ca8361474031ab69abcad9b4ab --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/0c9852c365df3b1611c71ccabfed0cc26a574ebf7515ee1488d453c71195d0e9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94772801bbe636025184144701034819b1e4dd1f7946197f6400cbb961caa7de +size 4495 diff --git a/data/2025/2504_10xxx/2504.10342/images/0ee04f55113fc5aace0ec41a462c21b3615295a732c880c148e7b6f41902a4d8.jpg b/data/2025/2504_10xxx/2504.10342/images/0ee04f55113fc5aace0ec41a462c21b3615295a732c880c148e7b6f41902a4d8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..474b39d79bdcbe5b1dd7403068b017ced61d51cb --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/0ee04f55113fc5aace0ec41a462c21b3615295a732c880c148e7b6f41902a4d8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1493abcc6b38aa89769b356462dcbdd81ae0bb6db0a8da1029683d8020fdeee6 +size 3185 diff --git a/data/2025/2504_10xxx/2504.10342/images/10bcbe79e5c7c5d962a5e38863c039aae6fad5ad4603da5cacbc3e1b9c18cc88.jpg b/data/2025/2504_10xxx/2504.10342/images/10bcbe79e5c7c5d962a5e38863c039aae6fad5ad4603da5cacbc3e1b9c18cc88.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a707d65b7d688cc1e8e32e94d1585789f712f546 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/10bcbe79e5c7c5d962a5e38863c039aae6fad5ad4603da5cacbc3e1b9c18cc88.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1797a720b269b32df7051664a5ff85cf47ae61a0bed038e585bf5f1d4432cbb2 +size 1526 diff --git a/data/2025/2504_10xxx/2504.10342/images/10bed72f87c8e193995a37a5c5e14a781a3051a3d0f0558297a496509e6fabfd.jpg b/data/2025/2504_10xxx/2504.10342/images/10bed72f87c8e193995a37a5c5e14a781a3051a3d0f0558297a496509e6fabfd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..accf9d98511f3a81db90bf708682cafe787a17ca --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/10bed72f87c8e193995a37a5c5e14a781a3051a3d0f0558297a496509e6fabfd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c27e3d8e5f5b87e27be6ebe2ddaa4ee74870166a6ef9c77f1dc6597ca4c3b832 +size 4353 diff --git a/data/2025/2504_10xxx/2504.10342/images/13c356b90f683f73e24d1a4dadc822c871d32f6cf7b204e3c8cfa4b0e7433dad.jpg b/data/2025/2504_10xxx/2504.10342/images/13c356b90f683f73e24d1a4dadc822c871d32f6cf7b204e3c8cfa4b0e7433dad.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8c1968e10e3d843b73965c3043ae36662e917851 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/13c356b90f683f73e24d1a4dadc822c871d32f6cf7b204e3c8cfa4b0e7433dad.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:108114b9db0396a1650bf505fef44d268ac996a099c413e232a109b5c8f62cda +size 18424 diff --git a/data/2025/2504_10xxx/2504.10342/images/14ab2b1c55d80d22512d08289291b338fbb40e27ab710ad313761a8d7b7d6cb2.jpg b/data/2025/2504_10xxx/2504.10342/images/14ab2b1c55d80d22512d08289291b338fbb40e27ab710ad313761a8d7b7d6cb2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d1b78b668d38c5ae71b777231d8e70bebaa2ca85 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/14ab2b1c55d80d22512d08289291b338fbb40e27ab710ad313761a8d7b7d6cb2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7ca0fcae3d9eb6b0e957227d435c882c29c7126a34f44dd094a99624e3c3b66 +size 8589 diff --git a/data/2025/2504_10xxx/2504.10342/images/14b366f4185c02e93ebe4219a693c366bf81475a9410c98c0e1a41b887c3316c.jpg b/data/2025/2504_10xxx/2504.10342/images/14b366f4185c02e93ebe4219a693c366bf81475a9410c98c0e1a41b887c3316c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8891fa9fae9a0af4286f8537073c7a49516d1396 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/14b366f4185c02e93ebe4219a693c366bf81475a9410c98c0e1a41b887c3316c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de192e5c1ecbb01d927de6c9527dcab4ebf6538e77c80101743a57d8632cad19 +size 1140 diff --git a/data/2025/2504_10xxx/2504.10342/images/14c6ef28685649387593825d78b70250342fe608769574e9273cfde733c2252b.jpg b/data/2025/2504_10xxx/2504.10342/images/14c6ef28685649387593825d78b70250342fe608769574e9273cfde733c2252b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7ce5cece14358fca45cd5e6cbbbcc3b4278b4fee --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/14c6ef28685649387593825d78b70250342fe608769574e9273cfde733c2252b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:acd474d1d6a0ef6ec8e06a9f19c4add44239e5d0b8de27d7aabb7c208163a1b4 +size 3375 diff --git a/data/2025/2504_10xxx/2504.10342/images/1639cb2aa2bb59d6aaebd4716fe3ca69f37c2ae86b1df24cdb81e1da99dfd062.jpg b/data/2025/2504_10xxx/2504.10342/images/1639cb2aa2bb59d6aaebd4716fe3ca69f37c2ae86b1df24cdb81e1da99dfd062.jpg new file mode 100644 index 0000000000000000000000000000000000000000..09384f923331f9cbe3f9a3fbbc8263317e5e77f3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/1639cb2aa2bb59d6aaebd4716fe3ca69f37c2ae86b1df24cdb81e1da99dfd062.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79fdc47f869b09d58396cb3a21e5c34d30d86973bbc3c0d9f5de2e9f631ca2cd +size 6610 diff --git a/data/2025/2504_10xxx/2504.10342/images/1662aa990cd75de3f0e4e15080707ff2d3e5d543c8a719718bfdc6202b92da36.jpg b/data/2025/2504_10xxx/2504.10342/images/1662aa990cd75de3f0e4e15080707ff2d3e5d543c8a719718bfdc6202b92da36.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a6e44687fcaa276b043bcbccc2bf32d55a6b1304 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/1662aa990cd75de3f0e4e15080707ff2d3e5d543c8a719718bfdc6202b92da36.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8194b6f9e2d5197014350b0386113d93fff7799d6c6e904b2c8bcc52b84172b2 +size 7360 diff --git a/data/2025/2504_10xxx/2504.10342/images/16fa6aae6f434bae2f7c980d27911263d2f0d6dcdde43098f76431b41a04006d.jpg b/data/2025/2504_10xxx/2504.10342/images/16fa6aae6f434bae2f7c980d27911263d2f0d6dcdde43098f76431b41a04006d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c5748cc57d737111c357eb419c904d54df370b3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/16fa6aae6f434bae2f7c980d27911263d2f0d6dcdde43098f76431b41a04006d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:663479c308694c05b47ec8cf56965eb1d442f18c180876af4f84ccfb49afa386 +size 1110 diff --git a/data/2025/2504_10xxx/2504.10342/images/17adf262a76fecf8616abb3242adf93be470dbd486216a32ecc0b909fe895b5c.jpg b/data/2025/2504_10xxx/2504.10342/images/17adf262a76fecf8616abb3242adf93be470dbd486216a32ecc0b909fe895b5c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..65e360b338e728aac64828f93adf8bc6ecd9bc57 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/17adf262a76fecf8616abb3242adf93be470dbd486216a32ecc0b909fe895b5c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bdb76987034654084a7b2d12ac0ca43826d1efb3d249e744005a1f7de12f2a8b +size 7043 diff --git a/data/2025/2504_10xxx/2504.10342/images/1994f7acc55cf3c5043ee5cd9ca074533673ed5163c5a6430e8a63567a075735.jpg b/data/2025/2504_10xxx/2504.10342/images/1994f7acc55cf3c5043ee5cd9ca074533673ed5163c5a6430e8a63567a075735.jpg new file mode 100644 index 0000000000000000000000000000000000000000..80579a07cb6264893a0c7654fe90154e41d2d2dd --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/1994f7acc55cf3c5043ee5cd9ca074533673ed5163c5a6430e8a63567a075735.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:466ed218bbeafedeb7702de46648561f49e5b409bcab14bc3f7b6eca5c5189aa +size 6997 diff --git a/data/2025/2504_10xxx/2504.10342/images/1ab5d39c309739928dbdbad3db6a7908d5d62c04378b3c3dfc2525e0114c5e01.jpg b/data/2025/2504_10xxx/2504.10342/images/1ab5d39c309739928dbdbad3db6a7908d5d62c04378b3c3dfc2525e0114c5e01.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ec1111fa31310af2e739b5e76485e3b6f7573bd2 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/1ab5d39c309739928dbdbad3db6a7908d5d62c04378b3c3dfc2525e0114c5e01.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5106cf74fe8256e203f647e7fde06c9afbac41ba4bd9d47236b8d6daf4928b4c +size 9143 diff --git a/data/2025/2504_10xxx/2504.10342/images/1dca39e1bbec887164256f6110fefe845dd7ef508126048d7df6d8430b767d84.jpg b/data/2025/2504_10xxx/2504.10342/images/1dca39e1bbec887164256f6110fefe845dd7ef508126048d7df6d8430b767d84.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6711b6c555f48601d502fb66149a6425b4f957c1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/1dca39e1bbec887164256f6110fefe845dd7ef508126048d7df6d8430b767d84.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c817c58698b71df64ea705dc3cf3fc164f6493435d05fe6f4fbdb8fb33218619 +size 11589 diff --git a/data/2025/2504_10xxx/2504.10342/images/1ed8ee9704d40204c4a09619e1b5a1f5ac09cbf72757ce1849a071e4e1de96c6.jpg b/data/2025/2504_10xxx/2504.10342/images/1ed8ee9704d40204c4a09619e1b5a1f5ac09cbf72757ce1849a071e4e1de96c6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9f3a717104210ef2288d3a30f484a8b2bb44b26f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/1ed8ee9704d40204c4a09619e1b5a1f5ac09cbf72757ce1849a071e4e1de96c6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d2e034058fdeeb0374c9c5a28eae52dabfc5502291872c1434c41e8d2a13b99 +size 21598 diff --git a/data/2025/2504_10xxx/2504.10342/images/22e8f2fd45414ea0c02565acbba730dd18db188566730e74d38f7073ac825006.jpg b/data/2025/2504_10xxx/2504.10342/images/22e8f2fd45414ea0c02565acbba730dd18db188566730e74d38f7073ac825006.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7f7eb7d23f2a360324df712d1ca19b83daf82ff1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/22e8f2fd45414ea0c02565acbba730dd18db188566730e74d38f7073ac825006.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a317c6c97e28da824e9ee87d9e9791b80c8be9b6edfb61592c4e5af6421c6b79 +size 1240 diff --git a/data/2025/2504_10xxx/2504.10342/images/257c05296d2ad268a2d7c6e59b7a42d0258eec902ec9535ec6137f13a615e6da.jpg b/data/2025/2504_10xxx/2504.10342/images/257c05296d2ad268a2d7c6e59b7a42d0258eec902ec9535ec6137f13a615e6da.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c5748cc57d737111c357eb419c904d54df370b3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/257c05296d2ad268a2d7c6e59b7a42d0258eec902ec9535ec6137f13a615e6da.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:663479c308694c05b47ec8cf56965eb1d442f18c180876af4f84ccfb49afa386 +size 1110 diff --git a/data/2025/2504_10xxx/2504.10342/images/2586382fa16ad27a1d1de419769a9996fc1643298d2473d0ee193d5899f640c7.jpg b/data/2025/2504_10xxx/2504.10342/images/2586382fa16ad27a1d1de419769a9996fc1643298d2473d0ee193d5899f640c7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c5748cc57d737111c357eb419c904d54df370b3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/2586382fa16ad27a1d1de419769a9996fc1643298d2473d0ee193d5899f640c7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:663479c308694c05b47ec8cf56965eb1d442f18c180876af4f84ccfb49afa386 +size 1110 diff --git a/data/2025/2504_10xxx/2504.10342/images/26d9b479b586179672f0a391c3433b4482c8e8deeca5515d3b7db271c2635940.jpg b/data/2025/2504_10xxx/2504.10342/images/26d9b479b586179672f0a391c3433b4482c8e8deeca5515d3b7db271c2635940.jpg new file mode 100644 index 0000000000000000000000000000000000000000..27162602716d4e5ed6eeec21e05ed73158bf302b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/26d9b479b586179672f0a391c3433b4482c8e8deeca5515d3b7db271c2635940.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3bdccbcec8c8cf89dec8d1c4c85c3c49b9e068079f8d38ca52c68a4a646d82e +size 39174 diff --git a/data/2025/2504_10xxx/2504.10342/images/27c1ab9e19183598421b47bc2b2cde9cfa0eca5758064122469fefc4dd9aa1ec.jpg b/data/2025/2504_10xxx/2504.10342/images/27c1ab9e19183598421b47bc2b2cde9cfa0eca5758064122469fefc4dd9aa1ec.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dbe29f591958686781e2a0f2e61b3076a0afef14 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/27c1ab9e19183598421b47bc2b2cde9cfa0eca5758064122469fefc4dd9aa1ec.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6bcbfa627b2713619b8ba81568c7c37659c570d9a93e30c0856db92c37f281b6 +size 836 diff --git a/data/2025/2504_10xxx/2504.10342/images/2bbfec971459982369375ff4238e17715565acb6e3858aa51c607ea4534d78fc.jpg b/data/2025/2504_10xxx/2504.10342/images/2bbfec971459982369375ff4238e17715565acb6e3858aa51c607ea4534d78fc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8f04ef9563ecad140ad530d92b89dafce736690b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/2bbfec971459982369375ff4238e17715565acb6e3858aa51c607ea4534d78fc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fbba7ce544cc4b56bdffaaf3e048dfc125d3bc135871e93add9e1fd3455839b +size 1459 diff --git a/data/2025/2504_10xxx/2504.10342/images/2c357401c5509f0dfd36a58990216a6180a467958210780c06b3d61a2f2e3160.jpg b/data/2025/2504_10xxx/2504.10342/images/2c357401c5509f0dfd36a58990216a6180a467958210780c06b3d61a2f2e3160.jpg new file mode 100644 index 0000000000000000000000000000000000000000..02a9f5d1c6e6a59773791b4380b317bd7393d00c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/2c357401c5509f0dfd36a58990216a6180a467958210780c06b3d61a2f2e3160.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c3416c066615b018f796bf0467305097fda59aaf8bba631ef6633f6937736ae +size 1293 diff --git a/data/2025/2504_10xxx/2504.10342/images/2d3a21ad802b33bf71c70e0abd227d85a77a0230e56bf9bc88272a39454b0e56.jpg b/data/2025/2504_10xxx/2504.10342/images/2d3a21ad802b33bf71c70e0abd227d85a77a0230e56bf9bc88272a39454b0e56.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8e777c2fe451d249e07aab390007b300398f8255 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/2d3a21ad802b33bf71c70e0abd227d85a77a0230e56bf9bc88272a39454b0e56.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ce0f479ab87858e6ea6a841663882e660610133053bf08c3a97b14c82396218 +size 4528 diff --git a/data/2025/2504_10xxx/2504.10342/images/2d8c23c6642e4fdc059fce954cbe74392aa42e50ac339d36ec570b4538d1b9cd.jpg b/data/2025/2504_10xxx/2504.10342/images/2d8c23c6642e4fdc059fce954cbe74392aa42e50ac339d36ec570b4538d1b9cd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9aa3966f90017dd921fd6675aad604767d41055f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/2d8c23c6642e4fdc059fce954cbe74392aa42e50ac339d36ec570b4538d1b9cd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68fb78159b4888a06903bd06c638298bdb045798e1e094947efedea6b870f0fd +size 3140 diff --git a/data/2025/2504_10xxx/2504.10342/images/2df83bc62266ef79c8c7d72021db90c5bdc93adf7f3d635def5ee2e1de1692b1.jpg b/data/2025/2504_10xxx/2504.10342/images/2df83bc62266ef79c8c7d72021db90c5bdc93adf7f3d635def5ee2e1de1692b1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e320bb404a2c6eba0de219a1866438ac7ca816bf --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/2df83bc62266ef79c8c7d72021db90c5bdc93adf7f3d635def5ee2e1de1692b1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:abb0451e0a08160dbc187b4a20f73b5981b1788a80d21d0c9b75aaa2ae9e69fe +size 32733 diff --git a/data/2025/2504_10xxx/2504.10342/images/2eb08ceca733cd8c85e3edca9cf46721d82bbaf628b9e06926a75a16c7a708b2.jpg b/data/2025/2504_10xxx/2504.10342/images/2eb08ceca733cd8c85e3edca9cf46721d82bbaf628b9e06926a75a16c7a708b2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..82696abf4e1403e992adf0c94234967b29aafdb3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/2eb08ceca733cd8c85e3edca9cf46721d82bbaf628b9e06926a75a16c7a708b2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd093d7eaef7ea7a9e90eed4e8424f367945bb8cd9325ef89d4f57c80fe252f6 +size 28225 diff --git a/data/2025/2504_10xxx/2504.10342/images/33212890af4e16181bfc285e43be24966ae928823fec25db8cb92706b2348fe9.jpg b/data/2025/2504_10xxx/2504.10342/images/33212890af4e16181bfc285e43be24966ae928823fec25db8cb92706b2348fe9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ec9ad232565a3fdda4faea5573de4e7d85bf74ff --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/33212890af4e16181bfc285e43be24966ae928823fec25db8cb92706b2348fe9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6bf592c92dc45725184c4038b1582fa4bf8949ed98f58cb4dc6bee954a496b08 +size 6415 diff --git a/data/2025/2504_10xxx/2504.10342/images/34dd8ccdd69dd7fd4e38f6df7a7500a4d561d79b031d7cf70a372c6481e767f5.jpg b/data/2025/2504_10xxx/2504.10342/images/34dd8ccdd69dd7fd4e38f6df7a7500a4d561d79b031d7cf70a372c6481e767f5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8345ab044fac7126ed060ce918e59cdffa042bb9 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/34dd8ccdd69dd7fd4e38f6df7a7500a4d561d79b031d7cf70a372c6481e767f5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e4d181226fceebc862c363da9c499980241fe46cf05d493d9ab21ac01c2782c +size 1959 diff --git a/data/2025/2504_10xxx/2504.10342/images/34f92a4aa521689cf2d90b4f881ab0bffe6490c57e02a38f4cb90bd5f6129190.jpg b/data/2025/2504_10xxx/2504.10342/images/34f92a4aa521689cf2d90b4f881ab0bffe6490c57e02a38f4cb90bd5f6129190.jpg new file mode 100644 index 0000000000000000000000000000000000000000..258ecfb2425574cf59260ddfc87b9e253ee47a33 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/34f92a4aa521689cf2d90b4f881ab0bffe6490c57e02a38f4cb90bd5f6129190.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56c0ff629888633dab7f296c79a2ac3f2339788e8848edf1fcfd67d75a043676 +size 1768 diff --git a/data/2025/2504_10xxx/2504.10342/images/363631807b6384d0673ac736965367e311b38d5638c70e070bdfb37313c8d221.jpg b/data/2025/2504_10xxx/2504.10342/images/363631807b6384d0673ac736965367e311b38d5638c70e070bdfb37313c8d221.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2987884e05e4b05d37c090145cd54437c5901862 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/363631807b6384d0673ac736965367e311b38d5638c70e070bdfb37313c8d221.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7f2405a81a455d585e2110225450e513e621d0118f96c95ee97861036467ca1 +size 37460 diff --git a/data/2025/2504_10xxx/2504.10342/images/37189ecbd81689a89efc1f56cd6a8ccc393d42226fa9319b2192c3ead6b3c5f1.jpg b/data/2025/2504_10xxx/2504.10342/images/37189ecbd81689a89efc1f56cd6a8ccc393d42226fa9319b2192c3ead6b3c5f1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8b21d65229081ddce776c030aa9cb0821363397a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/37189ecbd81689a89efc1f56cd6a8ccc393d42226fa9319b2192c3ead6b3c5f1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0933fa5401c1dce8b3b9b2d06987d066c2254ca48cd9d25cd9ab80066dde0b5 +size 14464 diff --git a/data/2025/2504_10xxx/2504.10342/images/37364e6ffedfd53d5cf11984921f5c210b210bd629162fe84fa6477d697c9de7.jpg b/data/2025/2504_10xxx/2504.10342/images/37364e6ffedfd53d5cf11984921f5c210b210bd629162fe84fa6477d697c9de7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..04845bca6ea7e78da7f3cd2da51ed3a8cb3d42b3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/37364e6ffedfd53d5cf11984921f5c210b210bd629162fe84fa6477d697c9de7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:87a5c718c5fa67ba3146087cc1fce34a49ca30d141aec0f6b2fbb271f83b2ed5 +size 742 diff --git a/data/2025/2504_10xxx/2504.10342/images/3737d6ab696a3c310dab5c98fb390eedc23b20fda6553c6b5310acdd5f1eabeb.jpg b/data/2025/2504_10xxx/2504.10342/images/3737d6ab696a3c310dab5c98fb390eedc23b20fda6553c6b5310acdd5f1eabeb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..72f082795b9142da44780842afcc18f861145314 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/3737d6ab696a3c310dab5c98fb390eedc23b20fda6553c6b5310acdd5f1eabeb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af8952c900379668a0f019492ff7aad3dfef3c5dd7bfdd5c78f95fb60ff64db4 +size 15998 diff --git a/data/2025/2504_10xxx/2504.10342/images/3baa80789cf99e4f68fa4353ef1538bb9d8872ab955ae28a990f5460c196ae12.jpg b/data/2025/2504_10xxx/2504.10342/images/3baa80789cf99e4f68fa4353ef1538bb9d8872ab955ae28a990f5460c196ae12.jpg new file mode 100644 index 0000000000000000000000000000000000000000..40ffca05e623bd37c181a2e96a62cb8d70659624 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/3baa80789cf99e4f68fa4353ef1538bb9d8872ab955ae28a990f5460c196ae12.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4afff3cc1c3d26d8c615dc778797e198d6181637c4c0370ab469dc402ef01009 +size 73761 diff --git a/data/2025/2504_10xxx/2504.10342/images/422bc896f6eb461b560d28a6bcdb7b46675ce08dc157a69ee3ea72239fe20f5f.jpg b/data/2025/2504_10xxx/2504.10342/images/422bc896f6eb461b560d28a6bcdb7b46675ce08dc157a69ee3ea72239fe20f5f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..49d2bdc6a192f4f1e5cfc1df1f8cb08f9c9b440c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/422bc896f6eb461b560d28a6bcdb7b46675ce08dc157a69ee3ea72239fe20f5f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fea654c9001753cd9dafa9eaadd2fcbdad0f4d8b8d6aa20288e7cbbfe701cf27 +size 24656 diff --git a/data/2025/2504_10xxx/2504.10342/images/424714a384e3ef3140a976266073aef801742fb243abbc70eecfcc5dd6030446.jpg b/data/2025/2504_10xxx/2504.10342/images/424714a384e3ef3140a976266073aef801742fb243abbc70eecfcc5dd6030446.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e186b70835c85991ee1faf3dfcea20dec2de7b11 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/424714a384e3ef3140a976266073aef801742fb243abbc70eecfcc5dd6030446.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b05ce372422769782ded855a4c27480054ea2be9d89ada8ad165f0cd99d42122 +size 1157 diff --git a/data/2025/2504_10xxx/2504.10342/images/4798f6c391395f4a6803778aab8ae4f3f1792d475d51602ec2691394a3f0b0da.jpg b/data/2025/2504_10xxx/2504.10342/images/4798f6c391395f4a6803778aab8ae4f3f1792d475d51602ec2691394a3f0b0da.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3845bffb717064ce23aa8abd75e0f210cf3e2207 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/4798f6c391395f4a6803778aab8ae4f3f1792d475d51602ec2691394a3f0b0da.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ed4ae4716f067a1615e8ddb2d69057da09a8e5ce0d1c63e2936eab2bf4ef425 +size 5341 diff --git a/data/2025/2504_10xxx/2504.10342/images/4cfbe405921837246d7d60cac33ea7bec0b336bb12e4e84a9fe4589d15475253.jpg b/data/2025/2504_10xxx/2504.10342/images/4cfbe405921837246d7d60cac33ea7bec0b336bb12e4e84a9fe4589d15475253.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5abd600563b2581c8a15f5ca075780376a6ba541 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/4cfbe405921837246d7d60cac33ea7bec0b336bb12e4e84a9fe4589d15475253.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:629105380e2c4761c73369db17f95eb7ec5596846b294df7bbcda8302e48743a +size 54894 diff --git a/data/2025/2504_10xxx/2504.10342/images/4d6f1428c39c2fafedb135682adbf4b8de0cd898e86e0609786958190341c596.jpg b/data/2025/2504_10xxx/2504.10342/images/4d6f1428c39c2fafedb135682adbf4b8de0cd898e86e0609786958190341c596.jpg new file mode 100644 index 0000000000000000000000000000000000000000..98bd859f0423d9c3adfabbb96502601cd3d55fcc --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/4d6f1428c39c2fafedb135682adbf4b8de0cd898e86e0609786958190341c596.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:586885f292d683378acfeecd1311c59baa8c7545cf88e97026531b88842962b0 +size 1908 diff --git a/data/2025/2504_10xxx/2504.10342/images/50a2de794b432270d8a6b8a4ca03979f06543861e5246d77de2f2322303eec57.jpg b/data/2025/2504_10xxx/2504.10342/images/50a2de794b432270d8a6b8a4ca03979f06543861e5246d77de2f2322303eec57.jpg new file mode 100644 index 0000000000000000000000000000000000000000..286ff4332d795699f61e8653211c52fb9fea114e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/50a2de794b432270d8a6b8a4ca03979f06543861e5246d77de2f2322303eec57.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dab877acf37d46f62dac33aa9efd06e5929e8ab39440afd63681cca108b2e2c0 +size 1302 diff --git a/data/2025/2504_10xxx/2504.10342/images/5173ddc5146d9cfeb2d74a7a9f9bb884fa2ce8df452523de82ced9c0974ae68a.jpg b/data/2025/2504_10xxx/2504.10342/images/5173ddc5146d9cfeb2d74a7a9f9bb884fa2ce8df452523de82ced9c0974ae68a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..982b3116a7bce2ce8b1bd2b23b713a05b176340a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/5173ddc5146d9cfeb2d74a7a9f9bb884fa2ce8df452523de82ced9c0974ae68a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48fc459b55a3c76c08535560966911067282d8a0a8a56b5150826f0e0ffd2bc7 +size 1777 diff --git a/data/2025/2504_10xxx/2504.10342/images/523a7fd5906283d19e4b6b98b68b5d07b68a0003d35bf321828d973d89158391.jpg b/data/2025/2504_10xxx/2504.10342/images/523a7fd5906283d19e4b6b98b68b5d07b68a0003d35bf321828d973d89158391.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3572efed2803ace3bf8302864a6feee42ac80711 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/523a7fd5906283d19e4b6b98b68b5d07b68a0003d35bf321828d973d89158391.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6a72146e5813589b2321a649038ff162d83174c6fc782d4176b72e6b3ba0939 +size 12911 diff --git a/data/2025/2504_10xxx/2504.10342/images/56840636c47c1f5df7225bd5590c34bae5d99f28eed1a5afd5b2806ace902ec9.jpg b/data/2025/2504_10xxx/2504.10342/images/56840636c47c1f5df7225bd5590c34bae5d99f28eed1a5afd5b2806ace902ec9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d171819e66d6c2a89e13d0ea67ab389d7ca782e9 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/56840636c47c1f5df7225bd5590c34bae5d99f28eed1a5afd5b2806ace902ec9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74395aad420e418b518de1bf92c5ac656f5507d3f285bd89870ff70c726363dd +size 1353 diff --git a/data/2025/2504_10xxx/2504.10342/images/56af6eb71d6e2339d73f592d622135add9ce4337cf2f468cd37ce3f84689bcee.jpg b/data/2025/2504_10xxx/2504.10342/images/56af6eb71d6e2339d73f592d622135add9ce4337cf2f468cd37ce3f84689bcee.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c5748cc57d737111c357eb419c904d54df370b3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/56af6eb71d6e2339d73f592d622135add9ce4337cf2f468cd37ce3f84689bcee.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:663479c308694c05b47ec8cf56965eb1d442f18c180876af4f84ccfb49afa386 +size 1110 diff --git a/data/2025/2504_10xxx/2504.10342/images/574525c730940cd3774fe74971f3aadf225b9b9840f7ef41a6416ea224eee334.jpg b/data/2025/2504_10xxx/2504.10342/images/574525c730940cd3774fe74971f3aadf225b9b9840f7ef41a6416ea224eee334.jpg new file mode 100644 index 0000000000000000000000000000000000000000..26c50c2b0e37eca4a17998fc53bde7317f1f1f22 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/574525c730940cd3774fe74971f3aadf225b9b9840f7ef41a6416ea224eee334.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b1e7b24034106012582923ed2fdec9f95589486d462894523a916e17933766a +size 9826 diff --git a/data/2025/2504_10xxx/2504.10342/images/583d34c288f215b97b70b7361ce23897f12f100ad60e17ac83d9a8aebba2de98.jpg b/data/2025/2504_10xxx/2504.10342/images/583d34c288f215b97b70b7361ce23897f12f100ad60e17ac83d9a8aebba2de98.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8891fa9fae9a0af4286f8537073c7a49516d1396 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/583d34c288f215b97b70b7361ce23897f12f100ad60e17ac83d9a8aebba2de98.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de192e5c1ecbb01d927de6c9527dcab4ebf6538e77c80101743a57d8632cad19 +size 1140 diff --git a/data/2025/2504_10xxx/2504.10342/images/5a9945fb2950cbab9e5972ec413e60b62a9b0274be1ab32e1b5d8735f9bb79f7.jpg b/data/2025/2504_10xxx/2504.10342/images/5a9945fb2950cbab9e5972ec413e60b62a9b0274be1ab32e1b5d8735f9bb79f7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0fc011c03ef7d0d53fb07801d8dffc7003678626 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/5a9945fb2950cbab9e5972ec413e60b62a9b0274be1ab32e1b5d8735f9bb79f7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:913982a76396ebcd65f97be280de1763f49e7ee45c73c955db993948f4d4e517 +size 22953 diff --git a/data/2025/2504_10xxx/2504.10342/images/5abe78030b7037208b41af28065a5b555d55d83ad1baf370ba0a12ff565461b1.jpg b/data/2025/2504_10xxx/2504.10342/images/5abe78030b7037208b41af28065a5b555d55d83ad1baf370ba0a12ff565461b1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..041e4cb5b3a8b33fa8b6c2a2000eb11d88c203ad --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/5abe78030b7037208b41af28065a5b555d55d83ad1baf370ba0a12ff565461b1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:838e3f11046f6e8d32dac3d5356ef49b6648705f1faf87ff31d9dd38f01d6ba0 +size 11137 diff --git a/data/2025/2504_10xxx/2504.10342/images/5c75fc66ff632914871c1c14e47d85c5dbae9791373244a7835cb9bafd5595e0.jpg b/data/2025/2504_10xxx/2504.10342/images/5c75fc66ff632914871c1c14e47d85c5dbae9791373244a7835cb9bafd5595e0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e569ed879967c30948b3e7ec85d6ba568b861d14 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/5c75fc66ff632914871c1c14e47d85c5dbae9791373244a7835cb9bafd5595e0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9219d04225c33e03586b17ad082e60f0c315ea4e096bf6db191749f45903ee0a +size 4089 diff --git a/data/2025/2504_10xxx/2504.10342/images/5ebbe461b93a2cbe955c72fb377c52e5c4b6a02d084792aa8be5cc43571a02a0.jpg b/data/2025/2504_10xxx/2504.10342/images/5ebbe461b93a2cbe955c72fb377c52e5c4b6a02d084792aa8be5cc43571a02a0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..06fa04fa5a90e4a1bab028062ef630d310f4407b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/5ebbe461b93a2cbe955c72fb377c52e5c4b6a02d084792aa8be5cc43571a02a0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47eb3d4d196e3a011c67ea929c6b99bec7001a7b7fa43c81157c0b059204000f +size 1875 diff --git a/data/2025/2504_10xxx/2504.10342/images/60a6175bfd05a6f7af089e7517aaca78aec0f7e4c49b2736be538da2e6e2dcfa.jpg b/data/2025/2504_10xxx/2504.10342/images/60a6175bfd05a6f7af089e7517aaca78aec0f7e4c49b2736be538da2e6e2dcfa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f7ce8a46c3a60cbcc836dbfaa6d83384b4677aef --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/60a6175bfd05a6f7af089e7517aaca78aec0f7e4c49b2736be538da2e6e2dcfa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9047d7c041e64a2680c5c176b56a9a0b453f720776471398a80ddecd5e4bb080 +size 14291 diff --git a/data/2025/2504_10xxx/2504.10342/images/60adf99283738943138682dd44b10b03153727886a6da65dcb8737ff516f26e7.jpg b/data/2025/2504_10xxx/2504.10342/images/60adf99283738943138682dd44b10b03153727886a6da65dcb8737ff516f26e7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c5748cc57d737111c357eb419c904d54df370b3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/60adf99283738943138682dd44b10b03153727886a6da65dcb8737ff516f26e7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:663479c308694c05b47ec8cf56965eb1d442f18c180876af4f84ccfb49afa386 +size 1110 diff --git a/data/2025/2504_10xxx/2504.10342/images/610adf6221d802cd88a3f2515ae82304ccb0bf5468106f467cd6d6c333f8fa7f.jpg b/data/2025/2504_10xxx/2504.10342/images/610adf6221d802cd88a3f2515ae82304ccb0bf5468106f467cd6d6c333f8fa7f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c16dcf412dcd1b8bdfb21ceecfb14221614967e2 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/610adf6221d802cd88a3f2515ae82304ccb0bf5468106f467cd6d6c333f8fa7f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ecbfd5e67f4fad519bab835b3e522e8600f7205b87450531e5ecaf84834ef48 +size 5371 diff --git a/data/2025/2504_10xxx/2504.10342/images/61b015d2546abf578760e8cc3932dd392875f94640dc490a388206fc830df894.jpg b/data/2025/2504_10xxx/2504.10342/images/61b015d2546abf578760e8cc3932dd392875f94640dc490a388206fc830df894.jpg new file mode 100644 index 0000000000000000000000000000000000000000..20e9b959310d7c2eba81fb88ce862e4150cba962 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/61b015d2546abf578760e8cc3932dd392875f94640dc490a388206fc830df894.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dab8cb45846e7c929c6c337b62cb33591cccd1d479d6496bea4bec3cf42060a9 +size 11567 diff --git a/data/2025/2504_10xxx/2504.10342/images/61cad466b49690891b856ba14cc70440f36a35be4a0a036b722eeab6b87a80a2.jpg b/data/2025/2504_10xxx/2504.10342/images/61cad466b49690891b856ba14cc70440f36a35be4a0a036b722eeab6b87a80a2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c5748cc57d737111c357eb419c904d54df370b3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/61cad466b49690891b856ba14cc70440f36a35be4a0a036b722eeab6b87a80a2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:663479c308694c05b47ec8cf56965eb1d442f18c180876af4f84ccfb49afa386 +size 1110 diff --git a/data/2025/2504_10xxx/2504.10342/images/61d4e7983c5dbd66a1aaf55dd7c03249b7db7e007c72d194c2194e20780a9fa2.jpg b/data/2025/2504_10xxx/2504.10342/images/61d4e7983c5dbd66a1aaf55dd7c03249b7db7e007c72d194c2194e20780a9fa2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..af46c4a23284aa44b9244765ef1d0290a35d6ef6 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/61d4e7983c5dbd66a1aaf55dd7c03249b7db7e007c72d194c2194e20780a9fa2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01b767d8b1181b3150a1d2a26640b5f682be6f34551e466e89378f90672c9dd0 +size 1632 diff --git a/data/2025/2504_10xxx/2504.10342/images/61f9deaf7571114247539cebe087ca5c23f67840862b57d8b656849458a44f08.jpg b/data/2025/2504_10xxx/2504.10342/images/61f9deaf7571114247539cebe087ca5c23f67840862b57d8b656849458a44f08.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c5748cc57d737111c357eb419c904d54df370b3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/61f9deaf7571114247539cebe087ca5c23f67840862b57d8b656849458a44f08.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:663479c308694c05b47ec8cf56965eb1d442f18c180876af4f84ccfb49afa386 +size 1110 diff --git a/data/2025/2504_10xxx/2504.10342/images/620f6d56098b109afe8ae4b63539ad8f4f4d3e720dff2c685ff3ad62e3d73bee.jpg b/data/2025/2504_10xxx/2504.10342/images/620f6d56098b109afe8ae4b63539ad8f4f4d3e720dff2c685ff3ad62e3d73bee.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9d0da600177696e536a81e0999b5e48051c1e829 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/620f6d56098b109afe8ae4b63539ad8f4f4d3e720dff2c685ff3ad62e3d73bee.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e53f35ed8d6c76f5d45241d118807a66be1ce8f7aa1d4f86801ab3ee5c6f9cb0 +size 9889 diff --git a/data/2025/2504_10xxx/2504.10342/images/64cd8ec62f766c056e33cbfafa74cd1d10500a34f2b993be19215152727c7859.jpg b/data/2025/2504_10xxx/2504.10342/images/64cd8ec62f766c056e33cbfafa74cd1d10500a34f2b993be19215152727c7859.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b7c9cd1be7aaf588ccd3f68a0d8dc2c03b026d89 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/64cd8ec62f766c056e33cbfafa74cd1d10500a34f2b993be19215152727c7859.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f6df092575106c1f0f2552c3aa3d15401ed866a8a89a21c958a5b031c0ab592 +size 189327 diff --git a/data/2025/2504_10xxx/2504.10342/images/65301e5a16dcfa61b2c31b244def2e72eb53721f2d5f98a279f1ff866ed35b6e.jpg b/data/2025/2504_10xxx/2504.10342/images/65301e5a16dcfa61b2c31b244def2e72eb53721f2d5f98a279f1ff866ed35b6e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7dd42d841e06baeb63e484cc45f06bc2ab35d0a3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/65301e5a16dcfa61b2c31b244def2e72eb53721f2d5f98a279f1ff866ed35b6e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca76aa3fc53339a7e498909e8aad3b0e1e22b4f574b3b64886d7747e82c208b3 +size 1950 diff --git a/data/2025/2504_10xxx/2504.10342/images/6d90ec4e3b20979b3232678bb8d97c8893f3506db7b9b76a3694d3800717de81.jpg b/data/2025/2504_10xxx/2504.10342/images/6d90ec4e3b20979b3232678bb8d97c8893f3506db7b9b76a3694d3800717de81.jpg new file mode 100644 index 0000000000000000000000000000000000000000..82347f3332f0233b4749f4e75c075a9b63634f86 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/6d90ec4e3b20979b3232678bb8d97c8893f3506db7b9b76a3694d3800717de81.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab6ee966504bf2aae8577d0e5eaf4faf2aefc6ea35d42a51f3129b1e43ae9cfe +size 2533 diff --git a/data/2025/2504_10xxx/2504.10342/images/6dcacdbc633bf2d93fe3048943845a396a23ab6786fee3bb15fbaa39e85fab8c.jpg b/data/2025/2504_10xxx/2504.10342/images/6dcacdbc633bf2d93fe3048943845a396a23ab6786fee3bb15fbaa39e85fab8c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c5748cc57d737111c357eb419c904d54df370b3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/6dcacdbc633bf2d93fe3048943845a396a23ab6786fee3bb15fbaa39e85fab8c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:663479c308694c05b47ec8cf56965eb1d442f18c180876af4f84ccfb49afa386 +size 1110 diff --git a/data/2025/2504_10xxx/2504.10342/images/6e1c6e63d19fe7d1bf2557d0f987c2866548f050e00b0b561dc19978100bddba.jpg b/data/2025/2504_10xxx/2504.10342/images/6e1c6e63d19fe7d1bf2557d0f987c2866548f050e00b0b561dc19978100bddba.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c071f0dc41b884784448f5fdb382b990f4ece571 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/6e1c6e63d19fe7d1bf2557d0f987c2866548f050e00b0b561dc19978100bddba.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:717a24c3ac4f5e8838d382060e6329a2185a8d8068a46d7fc220cddf0fa91500 +size 1921 diff --git a/data/2025/2504_10xxx/2504.10342/images/6e550321c6f928d460bbfa8999034f2fb0a676f1d34b0fb01b756caa063ddc83.jpg b/data/2025/2504_10xxx/2504.10342/images/6e550321c6f928d460bbfa8999034f2fb0a676f1d34b0fb01b756caa063ddc83.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2e591e5df150074d040edefac48bb8553045cc29 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/6e550321c6f928d460bbfa8999034f2fb0a676f1d34b0fb01b756caa063ddc83.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fba550103407de8366f911e8a1bb4d166a48c27a46e8746bc0fe39ebfcd7463e +size 10464 diff --git a/data/2025/2504_10xxx/2504.10342/images/6ff5da0dfd71ea7192bbc4b256b148718f303bd5f12d4f39bba8c9a93445e556.jpg b/data/2025/2504_10xxx/2504.10342/images/6ff5da0dfd71ea7192bbc4b256b148718f303bd5f12d4f39bba8c9a93445e556.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ccb1c901152b72ec5e42d0f6368ceb2ba444b52d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/6ff5da0dfd71ea7192bbc4b256b148718f303bd5f12d4f39bba8c9a93445e556.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba819b04b81f22b8689795279fe9a981eb49085f9da6cb4f081c1d75d511404e +size 8955 diff --git a/data/2025/2504_10xxx/2504.10342/images/70f13531e69835c70feace1f0c5d4cb9f407929b98d21c7bf77f8b7957c853c2.jpg b/data/2025/2504_10xxx/2504.10342/images/70f13531e69835c70feace1f0c5d4cb9f407929b98d21c7bf77f8b7957c853c2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8da74765c9ff3f524110da491969dcc808ecab06 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/70f13531e69835c70feace1f0c5d4cb9f407929b98d21c7bf77f8b7957c853c2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8dbbae25b2a7ad938f065807f1478b2c18bec9005c1557937efb56e1e683104f +size 26944 diff --git a/data/2025/2504_10xxx/2504.10342/images/734da95c949dd5902eb030ed4246a550a978e842247fed2f79fc705914fc1123.jpg b/data/2025/2504_10xxx/2504.10342/images/734da95c949dd5902eb030ed4246a550a978e842247fed2f79fc705914fc1123.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c5748cc57d737111c357eb419c904d54df370b3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/734da95c949dd5902eb030ed4246a550a978e842247fed2f79fc705914fc1123.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:663479c308694c05b47ec8cf56965eb1d442f18c180876af4f84ccfb49afa386 +size 1110 diff --git a/data/2025/2504_10xxx/2504.10342/images/739f5d92cd82927a2d7bdf4d2ab2309c4bc04f1b4fc7ed3709e892e83ee42b4e.jpg b/data/2025/2504_10xxx/2504.10342/images/739f5d92cd82927a2d7bdf4d2ab2309c4bc04f1b4fc7ed3709e892e83ee42b4e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c5748cc57d737111c357eb419c904d54df370b3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/739f5d92cd82927a2d7bdf4d2ab2309c4bc04f1b4fc7ed3709e892e83ee42b4e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:663479c308694c05b47ec8cf56965eb1d442f18c180876af4f84ccfb49afa386 +size 1110 diff --git a/data/2025/2504_10xxx/2504.10342/images/7461112c409fd02eba1473e6933495500624a55d6357cb99ddfdfec1549ea9f6.jpg b/data/2025/2504_10xxx/2504.10342/images/7461112c409fd02eba1473e6933495500624a55d6357cb99ddfdfec1549ea9f6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a611d6e88372f5181a27bfb5a2e16d3139ecd45c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/7461112c409fd02eba1473e6933495500624a55d6357cb99ddfdfec1549ea9f6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7753f7ed17e8eb3673d368027236d1b44ed4e273896b72b683a762b1991b1293 +size 3345 diff --git a/data/2025/2504_10xxx/2504.10342/images/78312d3960e121d282f214dae295f027c4d2a797ed5d6d36b898b32f29426c95.jpg b/data/2025/2504_10xxx/2504.10342/images/78312d3960e121d282f214dae295f027c4d2a797ed5d6d36b898b32f29426c95.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5f5f62e632e7ac97627e3863f094bfb09eaa4954 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/78312d3960e121d282f214dae295f027c4d2a797ed5d6d36b898b32f29426c95.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09221f15665f06193ec06f273522d94981928f5cb3379c4a4a7476d5599770e3 +size 18678 diff --git a/data/2025/2504_10xxx/2504.10342/images/783d6eab2c018327ef49d4996b35460aeb2b724d7e4d20b7f7afa306007e0475.jpg b/data/2025/2504_10xxx/2504.10342/images/783d6eab2c018327ef49d4996b35460aeb2b724d7e4d20b7f7afa306007e0475.jpg new file mode 100644 index 0000000000000000000000000000000000000000..31b1dbc38568b465e55206f768ef58e1e26b81de --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/783d6eab2c018327ef49d4996b35460aeb2b724d7e4d20b7f7afa306007e0475.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a526e9cd05d19174c9769980cd1fb4dc253a93bc712014da7b42e05056a5d34 +size 1121 diff --git a/data/2025/2504_10xxx/2504.10342/images/784558a7f58d3124ed539611b33a54af63298c5695a8d03677d909c7112a73fd.jpg b/data/2025/2504_10xxx/2504.10342/images/784558a7f58d3124ed539611b33a54af63298c5695a8d03677d909c7112a73fd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a48bf04956ecb0e21b50f04f62be93980736ac60 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/784558a7f58d3124ed539611b33a54af63298c5695a8d03677d909c7112a73fd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:391644f97e50c09b5de6cbcc58c6fa1b4c2f7340d849f2055bf830bf678d7514 +size 3384 diff --git a/data/2025/2504_10xxx/2504.10342/images/7e313e170b470582b9ae9c2a7973d28c3e99cdd5aa3de4c0ceb96af05e72e7c3.jpg b/data/2025/2504_10xxx/2504.10342/images/7e313e170b470582b9ae9c2a7973d28c3e99cdd5aa3de4c0ceb96af05e72e7c3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..235cc349222febe5112c0dd5c60d2655672fe2c0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/7e313e170b470582b9ae9c2a7973d28c3e99cdd5aa3de4c0ceb96af05e72e7c3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95c2a58f7381cfd8319d9b50032f4ab953e3743fe3583605457273bcaebbd3f6 +size 8765 diff --git a/data/2025/2504_10xxx/2504.10342/images/7ee0bf8793fb3d09c02768e277095ab8284e9064c0e822e409949164f9d52fe8.jpg b/data/2025/2504_10xxx/2504.10342/images/7ee0bf8793fb3d09c02768e277095ab8284e9064c0e822e409949164f9d52fe8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b90ed88075cd7a7e143fc79b67c0480e47190dfc --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/7ee0bf8793fb3d09c02768e277095ab8284e9064c0e822e409949164f9d52fe8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8cd4eaea934a48f643e209f2ce376549aa6c973f3c38b15b779a036fbe6deafb +size 2710 diff --git a/data/2025/2504_10xxx/2504.10342/images/7f678c3abec399cf19f625891724b356cd35e119f8affb3fdaca193802247dc2.jpg b/data/2025/2504_10xxx/2504.10342/images/7f678c3abec399cf19f625891724b356cd35e119f8affb3fdaca193802247dc2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fbe78918e583c1293ec0949f6793c35683373e25 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/7f678c3abec399cf19f625891724b356cd35e119f8affb3fdaca193802247dc2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5243fc2195a3b033f98464e2e0810ecee71c4ef453d874764ea74cc7b6debc1 +size 4807 diff --git a/data/2025/2504_10xxx/2504.10342/images/80da2aa740657911acc5a646847ccea63c96b896cfdddfe6c9198ef82424b766.jpg b/data/2025/2504_10xxx/2504.10342/images/80da2aa740657911acc5a646847ccea63c96b896cfdddfe6c9198ef82424b766.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8891fa9fae9a0af4286f8537073c7a49516d1396 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/80da2aa740657911acc5a646847ccea63c96b896cfdddfe6c9198ef82424b766.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de192e5c1ecbb01d927de6c9527dcab4ebf6538e77c80101743a57d8632cad19 +size 1140 diff --git a/data/2025/2504_10xxx/2504.10342/images/833d68dbfe0935e410c0edebe6b9a388a975d4f0df03af384d4597f191c08d49.jpg b/data/2025/2504_10xxx/2504.10342/images/833d68dbfe0935e410c0edebe6b9a388a975d4f0df03af384d4597f191c08d49.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6075f1baed6f03d6d25b324be06f5cd6ada4db56 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/833d68dbfe0935e410c0edebe6b9a388a975d4f0df03af384d4597f191c08d49.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79431a2c26363185ad51a7b05a28d5b90c2a4d1614541e443d1d83e96bbd81b4 +size 1702 diff --git a/data/2025/2504_10xxx/2504.10342/images/85d2f65accb24a4a7760e618083811214a75b10243babb7aedceef3ce06aab19.jpg b/data/2025/2504_10xxx/2504.10342/images/85d2f65accb24a4a7760e618083811214a75b10243babb7aedceef3ce06aab19.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4d417d936d26743142537bb2818e31c0cfb37718 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/85d2f65accb24a4a7760e618083811214a75b10243babb7aedceef3ce06aab19.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4dd1144f1c74953873017ffd311d08c599308a18d41f0b3efc8fef95d982199 +size 6472 diff --git a/data/2025/2504_10xxx/2504.10342/images/85fc0acebdc041898bc46ceeeb24682fb59f7759acc9e281093e3a19ec0fdb83.jpg b/data/2025/2504_10xxx/2504.10342/images/85fc0acebdc041898bc46ceeeb24682fb59f7759acc9e281093e3a19ec0fdb83.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9642c6ea850c348c0a91c08ab45afea90eca1190 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/85fc0acebdc041898bc46ceeeb24682fb59f7759acc9e281093e3a19ec0fdb83.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:090c578d804d9ef3875646ff279e9d510f7ad709c68da819018c84cb8f376e47 +size 4865 diff --git a/data/2025/2504_10xxx/2504.10342/images/88905e754aef245617dad71ab60cf29cbaa7de0c2cdc7bfa6246797edc486a4a.jpg b/data/2025/2504_10xxx/2504.10342/images/88905e754aef245617dad71ab60cf29cbaa7de0c2cdc7bfa6246797edc486a4a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8891fa9fae9a0af4286f8537073c7a49516d1396 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/88905e754aef245617dad71ab60cf29cbaa7de0c2cdc7bfa6246797edc486a4a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de192e5c1ecbb01d927de6c9527dcab4ebf6538e77c80101743a57d8632cad19 +size 1140 diff --git a/data/2025/2504_10xxx/2504.10342/images/89b0feaa5116c74ddcd31055f2b342bfdeeb7a6e14a35f8811b249750b39baac.jpg b/data/2025/2504_10xxx/2504.10342/images/89b0feaa5116c74ddcd31055f2b342bfdeeb7a6e14a35f8811b249750b39baac.jpg new file mode 100644 index 0000000000000000000000000000000000000000..85483e8cbb62b3f610cf0ce997542a6f7816fafe --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/89b0feaa5116c74ddcd31055f2b342bfdeeb7a6e14a35f8811b249750b39baac.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21e38b294fe7a4c1370fbd45c77db65db528017765c6c463cc90126e6d520489 +size 15441 diff --git a/data/2025/2504_10xxx/2504.10342/images/8ac1191f2d067a68d549fd32aad8c30f764923b7f3097b435600a39acd0bbc64.jpg b/data/2025/2504_10xxx/2504.10342/images/8ac1191f2d067a68d549fd32aad8c30f764923b7f3097b435600a39acd0bbc64.jpg new file mode 100644 index 0000000000000000000000000000000000000000..888bc5c93446cbb9625b2757e9b224b0e0a358f7 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/8ac1191f2d067a68d549fd32aad8c30f764923b7f3097b435600a39acd0bbc64.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80bcb334cd5de3069a26de83db1b55787bf71f7b647eb9971bc83526aa83dedd +size 23200 diff --git a/data/2025/2504_10xxx/2504.10342/images/8e7a8efbb9442d3da6e11d1452e1c3d6dfed84fbbc0dca5848c6be76f2f510cd.jpg b/data/2025/2504_10xxx/2504.10342/images/8e7a8efbb9442d3da6e11d1452e1c3d6dfed84fbbc0dca5848c6be76f2f510cd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c70650843a202fce60f5e7f32d6323874aa39b10 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/8e7a8efbb9442d3da6e11d1452e1c3d6dfed84fbbc0dca5848c6be76f2f510cd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8bf91e259e3de028da46456e98ea89fe52d2612c2cbc2b9b7d198222ba5b9435 +size 1796 diff --git a/data/2025/2504_10xxx/2504.10342/images/904965bbf4265f24d2577a52604da12422da4d054e267cebc6d8b1a6743bfb7f.jpg b/data/2025/2504_10xxx/2504.10342/images/904965bbf4265f24d2577a52604da12422da4d054e267cebc6d8b1a6743bfb7f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9507424ead1bbb187960594d7ef62b4cb3b4d96f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/904965bbf4265f24d2577a52604da12422da4d054e267cebc6d8b1a6743bfb7f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:321c4f3b313006f480e14f610b30e801f54d66e00e03b93f873541da13566644 +size 6215 diff --git a/data/2025/2504_10xxx/2504.10342/images/927d08a0f0f6c9898c84461b46688bcabf55b744090097079b653255cfc913a2.jpg b/data/2025/2504_10xxx/2504.10342/images/927d08a0f0f6c9898c84461b46688bcabf55b744090097079b653255cfc913a2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8891fa9fae9a0af4286f8537073c7a49516d1396 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/927d08a0f0f6c9898c84461b46688bcabf55b744090097079b653255cfc913a2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de192e5c1ecbb01d927de6c9527dcab4ebf6538e77c80101743a57d8632cad19 +size 1140 diff --git a/data/2025/2504_10xxx/2504.10342/images/92bbb61a0dc500ca4cc6ff7ae9e4bf125395e05e5543c7fc02bd64ec2111821d.jpg b/data/2025/2504_10xxx/2504.10342/images/92bbb61a0dc500ca4cc6ff7ae9e4bf125395e05e5543c7fc02bd64ec2111821d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1a1a9652c1dce858e2028d556fd29df095832d4f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/92bbb61a0dc500ca4cc6ff7ae9e4bf125395e05e5543c7fc02bd64ec2111821d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d581613ba5c718027648d7d7e65ccffbbefbba5d706c16e55e6099742c5f3e9d +size 15403 diff --git a/data/2025/2504_10xxx/2504.10342/images/932cbaf51c205a9c980fb43076c9fd7e28519b492074fde5af65adb7c67653ef.jpg b/data/2025/2504_10xxx/2504.10342/images/932cbaf51c205a9c980fb43076c9fd7e28519b492074fde5af65adb7c67653ef.jpg new file mode 100644 index 0000000000000000000000000000000000000000..53884f627e51b3192725e91cd877182b5a841032 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/932cbaf51c205a9c980fb43076c9fd7e28519b492074fde5af65adb7c67653ef.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c51a37bdb760e28f4af1127f9a43a629c617808c1b96b1cfb2fccf3dce05ec9 +size 32337 diff --git a/data/2025/2504_10xxx/2504.10342/images/97e95fdf454706c7da694622cce81f46164cd08513842a89a130211e7210be6f.jpg b/data/2025/2504_10xxx/2504.10342/images/97e95fdf454706c7da694622cce81f46164cd08513842a89a130211e7210be6f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8d6d5a9c94b119b5576e6e9a7a5c05a05b336c8e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/97e95fdf454706c7da694622cce81f46164cd08513842a89a130211e7210be6f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f25c405a000acda009d6063790a556c705a6561c86ae2b36d4fa0cd3ea99cb5 +size 32277 diff --git a/data/2025/2504_10xxx/2504.10342/images/97f4b7976903cd9ad74954a9240e4fb1d04f429d28e0ec5595ed2893a243218d.jpg b/data/2025/2504_10xxx/2504.10342/images/97f4b7976903cd9ad74954a9240e4fb1d04f429d28e0ec5595ed2893a243218d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..af1ddbc0c281e0b3fa18116d3e23887e9debf717 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/97f4b7976903cd9ad74954a9240e4fb1d04f429d28e0ec5595ed2893a243218d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db6bb7e148f1f426e2d77179f4f85f17fd253d83eedc2c6d774a43b3d551a58c +size 50974 diff --git a/data/2025/2504_10xxx/2504.10342/images/988fe318a4b806c51614855dc118b5905c5628e9d1a730257062c2d4125fef59.jpg b/data/2025/2504_10xxx/2504.10342/images/988fe318a4b806c51614855dc118b5905c5628e9d1a730257062c2d4125fef59.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4dea87040a5e27db34e9391e1c6066e3baed7e08 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/988fe318a4b806c51614855dc118b5905c5628e9d1a730257062c2d4125fef59.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6539bf82b56b11a3561371c5f387f9924a50216e5a0f088327631438c6332f09 +size 1939 diff --git a/data/2025/2504_10xxx/2504.10342/images/999904e34e7035b4b23684e5a2a42c123a5cad6b8ceb3076290e805a48da6d9f.jpg b/data/2025/2504_10xxx/2504.10342/images/999904e34e7035b4b23684e5a2a42c123a5cad6b8ceb3076290e805a48da6d9f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..85aa143026ccab52d8b51156fd74697a2d1eb300 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/999904e34e7035b4b23684e5a2a42c123a5cad6b8ceb3076290e805a48da6d9f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba9c8752857897c0c6710e01840adfc683da0a9480833efe91eea217eac33f9a +size 41869 diff --git a/data/2025/2504_10xxx/2504.10342/images/9b0cd4b7d45ea96e77e1a76fa635f68d59efca6ae415ddcea6116702d2a08391.jpg b/data/2025/2504_10xxx/2504.10342/images/9b0cd4b7d45ea96e77e1a76fa635f68d59efca6ae415ddcea6116702d2a08391.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b979f48a2e20ef75f08990259ce5f20222d808d3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/9b0cd4b7d45ea96e77e1a76fa635f68d59efca6ae415ddcea6116702d2a08391.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e18260feb24bd4245241c25107096d4bee05df2bd0909c219dfad4ce978d513e +size 9004 diff --git a/data/2025/2504_10xxx/2504.10342/images/9b686a95436f9a02fa717ed03cad01e1e16f1c7cc446ebdb14615dd0b806dd7a.jpg b/data/2025/2504_10xxx/2504.10342/images/9b686a95436f9a02fa717ed03cad01e1e16f1c7cc446ebdb14615dd0b806dd7a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a33fd28f6491372fcca58988914f8974a3c361e8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/9b686a95436f9a02fa717ed03cad01e1e16f1c7cc446ebdb14615dd0b806dd7a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1c73e6abe40b981564f3afc71d52f213f40a8ae56d1faa59622c87b2f0ed424 +size 746 diff --git a/data/2025/2504_10xxx/2504.10342/images/9be1b71d51b868e947ea438ebbee99b5295830e1be6a90035757235c7e3f403e.jpg b/data/2025/2504_10xxx/2504.10342/images/9be1b71d51b868e947ea438ebbee99b5295830e1be6a90035757235c7e3f403e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..829e7387b373a58f7df43efb64fbea9a16e399d1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/9be1b71d51b868e947ea438ebbee99b5295830e1be6a90035757235c7e3f403e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3a54d5db363f3b3a9ddf18ce43190dc02589a571931405de0495de976ac3a4d +size 17439 diff --git a/data/2025/2504_10xxx/2504.10342/images/9d2390fe3c1b50f4c9e0c3fc610d172eb6b346f542c089a5ccbaa752243e3aff.jpg b/data/2025/2504_10xxx/2504.10342/images/9d2390fe3c1b50f4c9e0c3fc610d172eb6b346f542c089a5ccbaa752243e3aff.jpg new file mode 100644 index 0000000000000000000000000000000000000000..16e13d2a8ce119fb7856b4266f87f63938c33a87 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/9d2390fe3c1b50f4c9e0c3fc610d172eb6b346f542c089a5ccbaa752243e3aff.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65c2a7be5b64c79c1c5555160d5dc13df00446202f283afac17621edb6630142 +size 16604 diff --git a/data/2025/2504_10xxx/2504.10342/images/a04ebb8a43502409b5fd77351daf18ccb85f6f642ae25c7224e556230afbcbc6.jpg b/data/2025/2504_10xxx/2504.10342/images/a04ebb8a43502409b5fd77351daf18ccb85f6f642ae25c7224e556230afbcbc6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..31b1dbc38568b465e55206f768ef58e1e26b81de --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/a04ebb8a43502409b5fd77351daf18ccb85f6f642ae25c7224e556230afbcbc6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a526e9cd05d19174c9769980cd1fb4dc253a93bc712014da7b42e05056a5d34 +size 1121 diff --git a/data/2025/2504_10xxx/2504.10342/images/a60ad6a9cc833737461a840efa4f6a01f8ba6ccbb0a93d01b817ef50dc45e361.jpg b/data/2025/2504_10xxx/2504.10342/images/a60ad6a9cc833737461a840efa4f6a01f8ba6ccbb0a93d01b817ef50dc45e361.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6f95c311e601bf13d82dfd60d3c068c42689a848 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/a60ad6a9cc833737461a840efa4f6a01f8ba6ccbb0a93d01b817ef50dc45e361.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:851e0973951123f87fd1d8e98bcf1a86133acf1c8068ac8585e918c58886d645 +size 3278 diff --git a/data/2025/2504_10xxx/2504.10342/images/a77154c58642491a23c268c4034f10ad4af28ba62a0d416a6bd8022d2dd274b2.jpg b/data/2025/2504_10xxx/2504.10342/images/a77154c58642491a23c268c4034f10ad4af28ba62a0d416a6bd8022d2dd274b2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3c12c8ee43a5d8c6c77839b3b747a61356a9cb99 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/a77154c58642491a23c268c4034f10ad4af28ba62a0d416a6bd8022d2dd274b2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57d111d39f479d13fadc4bb217aaab162b81571a34360d674d1ec80a7341574c +size 1240 diff --git a/data/2025/2504_10xxx/2504.10342/images/aab8e64fbb9a68e5fd7bb86a766ebaf859645b89ced9f8bad955ead33c881c8a.jpg b/data/2025/2504_10xxx/2504.10342/images/aab8e64fbb9a68e5fd7bb86a766ebaf859645b89ced9f8bad955ead33c881c8a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6a672865f3801bbd6d47769b5e5eae590280048d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/aab8e64fbb9a68e5fd7bb86a766ebaf859645b89ced9f8bad955ead33c881c8a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c9b67a007663d8733d66e94b5f5db5a27a62ba31dc0e92a9e07a22cebac0306f +size 9148 diff --git a/data/2025/2504_10xxx/2504.10342/images/ab47e0324a8f3696477f82c394854dd355acec65de459490a80c26dc69f8f9ff.jpg b/data/2025/2504_10xxx/2504.10342/images/ab47e0324a8f3696477f82c394854dd355acec65de459490a80c26dc69f8f9ff.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9b7bd24ee806c63679004952396993e7be44cbd6 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/ab47e0324a8f3696477f82c394854dd355acec65de459490a80c26dc69f8f9ff.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd3b9eca9f8056dbc402aa82d5d3c01261acc8cf64fb2cefecd1fa5306352645 +size 4310 diff --git a/data/2025/2504_10xxx/2504.10342/images/ad4b61a98884381daa9f1303c0f51f059c4fb92172fa528912856cb90462826f.jpg b/data/2025/2504_10xxx/2504.10342/images/ad4b61a98884381daa9f1303c0f51f059c4fb92172fa528912856cb90462826f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d2449e942a55c3bf1fc401ba61b39ea4c225b8cb --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/ad4b61a98884381daa9f1303c0f51f059c4fb92172fa528912856cb90462826f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:00dba0b153e5972d137c93c62c9c18936a8ae9029c2625c6abb20ce2e8363f24 +size 3914 diff --git a/data/2025/2504_10xxx/2504.10342/images/ad7496557f0542675336a310ca2059dadf3a471b7b68213fd05a31105358d979.jpg b/data/2025/2504_10xxx/2504.10342/images/ad7496557f0542675336a310ca2059dadf3a471b7b68213fd05a31105358d979.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0bfc1c5485043180c69eb1ffbe29c7df5f48065d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/ad7496557f0542675336a310ca2059dadf3a471b7b68213fd05a31105358d979.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8cb1293e31ec072e12bd43f43d5f805ef57a451f678de133f3fe283fba2e7a3f +size 1994 diff --git a/data/2025/2504_10xxx/2504.10342/images/adf5076b567b494c09c346d232e7291ede55ef3f81babce364269d00e4cdb522.jpg b/data/2025/2504_10xxx/2504.10342/images/adf5076b567b494c09c346d232e7291ede55ef3f81babce364269d00e4cdb522.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5c9b9e7f2c474a6c15c5661643d2d5037be983c3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/adf5076b567b494c09c346d232e7291ede55ef3f81babce364269d00e4cdb522.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfeba367fab5c686884d666d9e92d4f3266d65ae12647eaa3bd299d1061b7185 +size 726 diff --git a/data/2025/2504_10xxx/2504.10342/images/afd0faec9feb947504d44ca25613ade5faa37fee1a4f3a557b8ced9f2809e7ed.jpg b/data/2025/2504_10xxx/2504.10342/images/afd0faec9feb947504d44ca25613ade5faa37fee1a4f3a557b8ced9f2809e7ed.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c5748cc57d737111c357eb419c904d54df370b3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/afd0faec9feb947504d44ca25613ade5faa37fee1a4f3a557b8ced9f2809e7ed.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:663479c308694c05b47ec8cf56965eb1d442f18c180876af4f84ccfb49afa386 +size 1110 diff --git a/data/2025/2504_10xxx/2504.10342/images/b175172cabef3457d3fbf51009db02477dbf908a85f4f7569d8011b8942a5875.jpg b/data/2025/2504_10xxx/2504.10342/images/b175172cabef3457d3fbf51009db02477dbf908a85f4f7569d8011b8942a5875.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c70d2fc9a93ada8f151309e12c69c7c372d84bb9 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/b175172cabef3457d3fbf51009db02477dbf908a85f4f7569d8011b8942a5875.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89ad1d0a3a3f816124f9da024a9144ef435dfb15beba501bf7e9c3b4befaab8f +size 1143 diff --git a/data/2025/2504_10xxx/2504.10342/images/b3960c57aa37463f6a6aae7ee509937f01607f72a797a14974a5afe562bad84c.jpg b/data/2025/2504_10xxx/2504.10342/images/b3960c57aa37463f6a6aae7ee509937f01607f72a797a14974a5afe562bad84c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f59ed75938a0cb1ee991ad1ccca0eec9177e4e25 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/b3960c57aa37463f6a6aae7ee509937f01607f72a797a14974a5afe562bad84c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71427d247d0fb9925fc81d0ab18fb76d5736e12e773e7d94d8b875958e3eb6dd +size 3524 diff --git a/data/2025/2504_10xxx/2504.10342/images/b3a81caaab7e6d1a6308effe863bcf676eca4ca870e4b241ebb9c196a80297ae.jpg b/data/2025/2504_10xxx/2504.10342/images/b3a81caaab7e6d1a6308effe863bcf676eca4ca870e4b241ebb9c196a80297ae.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8cbf4e6efd91761e4c2eaefc569d58ab73bd5da5 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/b3a81caaab7e6d1a6308effe863bcf676eca4ca870e4b241ebb9c196a80297ae.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43b78a86633ec0a726bee02dc38a60059a8df5ca396a187b806cdf06cf90287d +size 4925 diff --git a/data/2025/2504_10xxx/2504.10342/images/b3f117d6f51d23f162333ef36f7641cdcf69a8d721c3509ea76381b786e355f5.jpg b/data/2025/2504_10xxx/2504.10342/images/b3f117d6f51d23f162333ef36f7641cdcf69a8d721c3509ea76381b786e355f5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9f053de535ce2f6d60e0693c7cce4df266cb5793 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/b3f117d6f51d23f162333ef36f7641cdcf69a8d721c3509ea76381b786e355f5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de2f218bea46871b65a995c41e418698968fa6510304d2f205e7b455c6976b21 +size 17286 diff --git a/data/2025/2504_10xxx/2504.10342/images/b49fbe7e0699eadfaf858d3fd9a5d59db1d54bb1ba3708e26d31d65d8c714c0e.jpg b/data/2025/2504_10xxx/2504.10342/images/b49fbe7e0699eadfaf858d3fd9a5d59db1d54bb1ba3708e26d31d65d8c714c0e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d54af9402a2f37453590bb225e890d543ef82b0f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/b49fbe7e0699eadfaf858d3fd9a5d59db1d54bb1ba3708e26d31d65d8c714c0e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0ae6bdffa154e303419255f716bafe9ede358fe76a47e83b7687058403f2adb +size 56502 diff --git a/data/2025/2504_10xxx/2504.10342/images/b536792942d0b01abaf60cfa156d36a936bb060f4df1b04a5055134b1d998792.jpg b/data/2025/2504_10xxx/2504.10342/images/b536792942d0b01abaf60cfa156d36a936bb060f4df1b04a5055134b1d998792.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4e2d34fe7ffd28746899ebb47236ba8e1a02b211 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/b536792942d0b01abaf60cfa156d36a936bb060f4df1b04a5055134b1d998792.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e3f5798b191da85146c90a221e3446265649fbfdc1157a49a279c9bca3937d7 +size 19405 diff --git a/data/2025/2504_10xxx/2504.10342/images/b762364e4fbdadb57bcfa6d9c604c9302b6522c57bd98dcd1c60d88d2028f45b.jpg b/data/2025/2504_10xxx/2504.10342/images/b762364e4fbdadb57bcfa6d9c604c9302b6522c57bd98dcd1c60d88d2028f45b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a0a721d86d42673fa690f5bd1e2778fbd1087d57 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/b762364e4fbdadb57bcfa6d9c604c9302b6522c57bd98dcd1c60d88d2028f45b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b936f3fb4d3c2fd862f204dfa371daf2440736273eeb79f7deea75bc3b71eebc +size 1913 diff --git a/data/2025/2504_10xxx/2504.10342/images/b7644b24bb6cc17803106504397075a33326ffdc8d6e7a7d2ceb81edbe695538.jpg b/data/2025/2504_10xxx/2504.10342/images/b7644b24bb6cc17803106504397075a33326ffdc8d6e7a7d2ceb81edbe695538.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bfb267a102281b6f77dac163a33b1f7663bfb388 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/b7644b24bb6cc17803106504397075a33326ffdc8d6e7a7d2ceb81edbe695538.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:00ef21f5c859701fac9cb6d78e446dcae6efc87a36790012d1122a14c6eeb0d3 +size 1943 diff --git a/data/2025/2504_10xxx/2504.10342/images/b7d7d1a6defad214c382f124a73a41a9d9160dcaaf744d712a10ca291058ec55.jpg b/data/2025/2504_10xxx/2504.10342/images/b7d7d1a6defad214c382f124a73a41a9d9160dcaaf744d712a10ca291058ec55.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1639e1e5dce3408e1d6177878f2a6c98221cb5cd --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/b7d7d1a6defad214c382f124a73a41a9d9160dcaaf744d712a10ca291058ec55.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4cc7ab8c838f1685c7f6c1023bdef6e4a62a4895a34d400a2d0698839c280661 +size 3228 diff --git a/data/2025/2504_10xxx/2504.10342/images/b8c7c0234db97446c1b2e4d704709d806fe90a0242a41fc9f438630674fb830f.jpg b/data/2025/2504_10xxx/2504.10342/images/b8c7c0234db97446c1b2e4d704709d806fe90a0242a41fc9f438630674fb830f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..df76bdb3131e74821b214527b63a016aa469d84e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/b8c7c0234db97446c1b2e4d704709d806fe90a0242a41fc9f438630674fb830f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1212ce47e97c7700fa9b832177a630079b934dc60592587e875fe6af302a62c +size 4770 diff --git a/data/2025/2504_10xxx/2504.10342/images/b93bf93c475a4531b9fbb26b1242800aad6dc72e38106580e5a55971721ed1bd.jpg b/data/2025/2504_10xxx/2504.10342/images/b93bf93c475a4531b9fbb26b1242800aad6dc72e38106580e5a55971721ed1bd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..16bbbdfa308ab98862e5f4d325eee15068f8406c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/b93bf93c475a4531b9fbb26b1242800aad6dc72e38106580e5a55971721ed1bd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:284a59d8f8c558b109fdaa5b85a573afbe92175e41cf7c2d7902bbe5783ad2aa +size 2443 diff --git a/data/2025/2504_10xxx/2504.10342/images/b9bd0cb26aec3f688bb02c915e6e3c6a947257aafe514b16d19b7342827dc8a7.jpg b/data/2025/2504_10xxx/2504.10342/images/b9bd0cb26aec3f688bb02c915e6e3c6a947257aafe514b16d19b7342827dc8a7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..74fad415006372c61ebac6e2720e9312909c54d5 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/b9bd0cb26aec3f688bb02c915e6e3c6a947257aafe514b16d19b7342827dc8a7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:504e6691d55ba3228efe023769cafcc964371cee8669254b7dffe61011e2bef6 +size 1163 diff --git a/data/2025/2504_10xxx/2504.10342/images/bea696fa24270a2378f3ea6791555d4d9e3504dd156e7e882d77f1f7fd86dcd8.jpg b/data/2025/2504_10xxx/2504.10342/images/bea696fa24270a2378f3ea6791555d4d9e3504dd156e7e882d77f1f7fd86dcd8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..82018da0878a130e0f661a3ae851ec9bc71c18a6 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/bea696fa24270a2378f3ea6791555d4d9e3504dd156e7e882d77f1f7fd86dcd8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f5f59dd971033fc9f076506a6062781efe81e0fd920951d4eceee9ab482f15e +size 725 diff --git a/data/2025/2504_10xxx/2504.10342/images/bf86655e1940e3c1363d3ce9ef3b6abf9cd477dffada06d559f29bcafaa6f316.jpg b/data/2025/2504_10xxx/2504.10342/images/bf86655e1940e3c1363d3ce9ef3b6abf9cd477dffada06d559f29bcafaa6f316.jpg new file mode 100644 index 0000000000000000000000000000000000000000..87ff282bff4e3c8ee9ffff60e113df1dc4d33fc2 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/bf86655e1940e3c1363d3ce9ef3b6abf9cd477dffada06d559f29bcafaa6f316.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffd97b042af2c4c2f6221ddf662265e4aa23d66e0b7bec8396819b585f204bd7 +size 36230 diff --git a/data/2025/2504_10xxx/2504.10342/images/c026a961938ed94885c911fa25839173a82101bc93bfae62d8fee252741b6a94.jpg b/data/2025/2504_10xxx/2504.10342/images/c026a961938ed94885c911fa25839173a82101bc93bfae62d8fee252741b6a94.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c5748cc57d737111c357eb419c904d54df370b3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/c026a961938ed94885c911fa25839173a82101bc93bfae62d8fee252741b6a94.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:663479c308694c05b47ec8cf56965eb1d442f18c180876af4f84ccfb49afa386 +size 1110 diff --git a/data/2025/2504_10xxx/2504.10342/images/c5e2c4db744c3657fe1cb4531ced0aa2d78982d0bb5fcdc58270986c6c22c141.jpg b/data/2025/2504_10xxx/2504.10342/images/c5e2c4db744c3657fe1cb4531ced0aa2d78982d0bb5fcdc58270986c6c22c141.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7e6086289a14a9b96c39935ffe0a55ff9f3f31f2 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/c5e2c4db744c3657fe1cb4531ced0aa2d78982d0bb5fcdc58270986c6c22c141.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0506f4e28f1f77ab6ca1dd87fe41f80cefe7612b8ee3ea452dccb3b53bf94bf +size 27401 diff --git a/data/2025/2504_10xxx/2504.10342/images/c6fc1a83d6a9bc93b8784359ac076f5ca84d132570716648ebe7672867c6bd45.jpg b/data/2025/2504_10xxx/2504.10342/images/c6fc1a83d6a9bc93b8784359ac076f5ca84d132570716648ebe7672867c6bd45.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8891fa9fae9a0af4286f8537073c7a49516d1396 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/c6fc1a83d6a9bc93b8784359ac076f5ca84d132570716648ebe7672867c6bd45.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de192e5c1ecbb01d927de6c9527dcab4ebf6538e77c80101743a57d8632cad19 +size 1140 diff --git a/data/2025/2504_10xxx/2504.10342/images/c86427df64f338c67429e5c9fd24fa20e14af7ab99cc71ca9cd42b04000c044d.jpg b/data/2025/2504_10xxx/2504.10342/images/c86427df64f338c67429e5c9fd24fa20e14af7ab99cc71ca9cd42b04000c044d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..becd4ef9df41f67415b6818a12143579c4e10def --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/c86427df64f338c67429e5c9fd24fa20e14af7ab99cc71ca9cd42b04000c044d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de680f92c4bc7ac20fb667a05a74706021ff7765394fe4e305a5d5af4410cdcd +size 146290 diff --git a/data/2025/2504_10xxx/2504.10342/images/c8fb5f58b9700cf16ecc6cb3575f9894069c4c885cbea362069dec97779eacbd.jpg b/data/2025/2504_10xxx/2504.10342/images/c8fb5f58b9700cf16ecc6cb3575f9894069c4c885cbea362069dec97779eacbd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6acbbdf737239c2fd6f78c10ed6bb665ee9e7308 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/c8fb5f58b9700cf16ecc6cb3575f9894069c4c885cbea362069dec97779eacbd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3281b089c39522636485ba34e9f50272d301627ccfd22f5463098b575b07894a +size 2149 diff --git a/data/2025/2504_10xxx/2504.10342/images/c92374dde78af9c0322744c19117f204b609d167b4de3d59228ed153f09a6a03.jpg b/data/2025/2504_10xxx/2504.10342/images/c92374dde78af9c0322744c19117f204b609d167b4de3d59228ed153f09a6a03.jpg new file mode 100644 index 0000000000000000000000000000000000000000..30333fd6511bb5b877f78a7fb5ab05c6cb2b309d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/c92374dde78af9c0322744c19117f204b609d167b4de3d59228ed153f09a6a03.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77f36b160e84c874046c6f6272bdf12acd4107846e2b19f0d5e03ddd4aa12419 +size 3635 diff --git a/data/2025/2504_10xxx/2504.10342/images/cb35d65666a6596b675ccdc24c9b37f588de9281849111d31cf59dc8561dd8a1.jpg b/data/2025/2504_10xxx/2504.10342/images/cb35d65666a6596b675ccdc24c9b37f588de9281849111d31cf59dc8561dd8a1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..da2b8ad0846ef40bb7093bf799c06d397f3ab354 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/cb35d65666a6596b675ccdc24c9b37f588de9281849111d31cf59dc8561dd8a1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d38a9105f4cb1c9fff09cc3a157cb7032bddae6c0ec72403dc685a3fee0f830a +size 1253 diff --git a/data/2025/2504_10xxx/2504.10342/images/cbd2f7e3140808a83ca69a0a2e1610dd9a654841f60dae20b2433020e2adb7e1.jpg b/data/2025/2504_10xxx/2504.10342/images/cbd2f7e3140808a83ca69a0a2e1610dd9a654841f60dae20b2433020e2adb7e1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..31b1dbc38568b465e55206f768ef58e1e26b81de --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/cbd2f7e3140808a83ca69a0a2e1610dd9a654841f60dae20b2433020e2adb7e1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a526e9cd05d19174c9769980cd1fb4dc253a93bc712014da7b42e05056a5d34 +size 1121 diff --git a/data/2025/2504_10xxx/2504.10342/images/ce6eca9bc80c76f6a1977c79291f1241734efb0200616f04e4ea89031045eeaf.jpg b/data/2025/2504_10xxx/2504.10342/images/ce6eca9bc80c76f6a1977c79291f1241734efb0200616f04e4ea89031045eeaf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bd1907577b4bd8a0af133a6af677b47e2bf187aa --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/ce6eca9bc80c76f6a1977c79291f1241734efb0200616f04e4ea89031045eeaf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa4a14ebc714865acfaeae8fd42e1b9338a3726207e819431ba872e085bdbb4f +size 136053 diff --git a/data/2025/2504_10xxx/2504.10342/images/cefa652b64bca2cb3ded5a974bd666119b76399473475757d6e80f5058d67cca.jpg b/data/2025/2504_10xxx/2504.10342/images/cefa652b64bca2cb3ded5a974bd666119b76399473475757d6e80f5058d67cca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c5748cc57d737111c357eb419c904d54df370b3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/cefa652b64bca2cb3ded5a974bd666119b76399473475757d6e80f5058d67cca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:663479c308694c05b47ec8cf56965eb1d442f18c180876af4f84ccfb49afa386 +size 1110 diff --git a/data/2025/2504_10xxx/2504.10342/images/d16e3f610ac02e622ac34890433e4efd1957e013f904088e7fb2cd385a5816c5.jpg b/data/2025/2504_10xxx/2504.10342/images/d16e3f610ac02e622ac34890433e4efd1957e013f904088e7fb2cd385a5816c5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3a0f203227bbc9d3cede30df5fce085a1cefb4fc --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/d16e3f610ac02e622ac34890433e4efd1957e013f904088e7fb2cd385a5816c5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45fff9c11dd45909f3bff4e6772f22f1738bb77a88b2d1d04b4798bbb78b933a +size 1223 diff --git a/data/2025/2504_10xxx/2504.10342/images/d20318dee2d84b8a10a978ba5d30a8fbe102063bc9651d8ef76d1f77c1d7ef91.jpg b/data/2025/2504_10xxx/2504.10342/images/d20318dee2d84b8a10a978ba5d30a8fbe102063bc9651d8ef76d1f77c1d7ef91.jpg new file mode 100644 index 0000000000000000000000000000000000000000..994a5affe598705828953dae9559620979222bce --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/d20318dee2d84b8a10a978ba5d30a8fbe102063bc9651d8ef76d1f77c1d7ef91.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3cef8ae59bb07d66572d1fa55dc209f2bdb63246508b08a180d84c4d1c79d6f3 +size 3870 diff --git a/data/2025/2504_10xxx/2504.10342/images/d6fcb4af5124da5e912611d8cc6c3d5089127f9bcd51a49bbc48a62769953913.jpg b/data/2025/2504_10xxx/2504.10342/images/d6fcb4af5124da5e912611d8cc6c3d5089127f9bcd51a49bbc48a62769953913.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e62e51c452c3c266f592e6ab83c3d77e75c4438f --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/d6fcb4af5124da5e912611d8cc6c3d5089127f9bcd51a49bbc48a62769953913.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2edc68a2ce0a86d07cedae287c6cef44543b67bb547cbb634b9ff8445430a086 +size 28518 diff --git a/data/2025/2504_10xxx/2504.10342/images/d7598ff388d40ce2b7daff4e08614c80aa41b7b49ae51fea453b622c3c9556ad.jpg b/data/2025/2504_10xxx/2504.10342/images/d7598ff388d40ce2b7daff4e08614c80aa41b7b49ae51fea453b622c3c9556ad.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d64babeafb4f18dd6c2f881965aa5107858de0d0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/d7598ff388d40ce2b7daff4e08614c80aa41b7b49ae51fea453b622c3c9556ad.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:acdb1926dfc9ceee60422e5f97b21bb804f6eabfa41610a601d5de373cfc07df +size 2709 diff --git a/data/2025/2504_10xxx/2504.10342/images/d90d53e47cf6e5230fb7578325d85a11aeb477745ec81d8999ae9db553e82ac7.jpg b/data/2025/2504_10xxx/2504.10342/images/d90d53e47cf6e5230fb7578325d85a11aeb477745ec81d8999ae9db553e82ac7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..663573588b08c1e7dd22e6c5c7e4c735f9b871d1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/d90d53e47cf6e5230fb7578325d85a11aeb477745ec81d8999ae9db553e82ac7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce50bf9778d12d7d8dd42cc632bdd71ba49511a0dbf75135e25e4c1b2c5ffee3 +size 10179 diff --git a/data/2025/2504_10xxx/2504.10342/images/d96d394318067b904a712079a7165288a420ae879a0d88f7529f2c1c573e3d0f.jpg b/data/2025/2504_10xxx/2504.10342/images/d96d394318067b904a712079a7165288a420ae879a0d88f7529f2c1c573e3d0f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c3ac80cc81dd5fe088c166ddd75fb992eacde181 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/d96d394318067b904a712079a7165288a420ae879a0d88f7529f2c1c573e3d0f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a613ffd98c73a1eaeb9312308cfcf99244bf589a0cd2041a56d7fe4af07f63d +size 1609 diff --git a/data/2025/2504_10xxx/2504.10342/images/dabc692ef1c0c1082feaaf1e2462b37ee282600a2506e8c89411d6c3df9a1439.jpg b/data/2025/2504_10xxx/2504.10342/images/dabc692ef1c0c1082feaaf1e2462b37ee282600a2506e8c89411d6c3df9a1439.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5fd995e6c6a807921ea886709164c73117e3e96d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/dabc692ef1c0c1082feaaf1e2462b37ee282600a2506e8c89411d6c3df9a1439.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88ec2cff292a4434ff28fcd4ff7ea75036b1e4fd7bbac12966c0d2ff0b864855 +size 2191 diff --git a/data/2025/2504_10xxx/2504.10342/images/dc9f11d9c6ca0344d00277395624f99e2b3066e6ca555339dae6e6a396995963.jpg b/data/2025/2504_10xxx/2504.10342/images/dc9f11d9c6ca0344d00277395624f99e2b3066e6ca555339dae6e6a396995963.jpg new file mode 100644 index 0000000000000000000000000000000000000000..46a5d11007ca9f8bc198e1228e086ea6b4a4188c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/dc9f11d9c6ca0344d00277395624f99e2b3066e6ca555339dae6e6a396995963.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c35228f21e0044499ed8652095140eac2ee6bc3379e114467688b92546f76f5f +size 27060 diff --git a/data/2025/2504_10xxx/2504.10342/images/dd2c3eda1142f57bd67e35d01e2305f06caa0abe215815db4166a95aaa2a731c.jpg b/data/2025/2504_10xxx/2504.10342/images/dd2c3eda1142f57bd67e35d01e2305f06caa0abe215815db4166a95aaa2a731c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9e12fd0298ce993cd41dfaf034df0fac939a9e88 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/dd2c3eda1142f57bd67e35d01e2305f06caa0abe215815db4166a95aaa2a731c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e875b3334adbf5ab6b940f53cbce585dc6df03951d0e0a98a92f36cd4627b9e +size 20742 diff --git a/data/2025/2504_10xxx/2504.10342/images/df192506b0a870a0c7543522cc6648f7a5cebc30bdfbff972c7e7ef764491a13.jpg b/data/2025/2504_10xxx/2504.10342/images/df192506b0a870a0c7543522cc6648f7a5cebc30bdfbff972c7e7ef764491a13.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6218a6d275ec0509ccfa9e9642d5f48fc2458c3b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/df192506b0a870a0c7543522cc6648f7a5cebc30bdfbff972c7e7ef764491a13.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79cc992757f403ad556b976487cc0d76b3f1344681ad20fd2e925cf67624f55e +size 1169 diff --git a/data/2025/2504_10xxx/2504.10342/images/e07d22a4f5fa74094ba8126af08a759927de508d0089c6af0dc1d22ad43d3d84.jpg b/data/2025/2504_10xxx/2504.10342/images/e07d22a4f5fa74094ba8126af08a759927de508d0089c6af0dc1d22ad43d3d84.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8179c90acbaf073a028d40e2fa3d6ccd98fd1223 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/e07d22a4f5fa74094ba8126af08a759927de508d0089c6af0dc1d22ad43d3d84.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed2d5e3da0b3b6848eaf657e5d5ab0168673d8935b3dcab7ebff2b44fe68dbfc +size 31948 diff --git a/data/2025/2504_10xxx/2504.10342/images/e39d29d3f3dbe47128d3e18cab78b68ccdeb0175590f89a101b716c36c4cfca3.jpg b/data/2025/2504_10xxx/2504.10342/images/e39d29d3f3dbe47128d3e18cab78b68ccdeb0175590f89a101b716c36c4cfca3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..957ceb50fed989082037b9741b111ae13b4da031 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/e39d29d3f3dbe47128d3e18cab78b68ccdeb0175590f89a101b716c36c4cfca3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ba37c8324571c4ab8ed0b132cd83778ca43830bf710f8cfe431538c6d75b746 +size 20114 diff --git a/data/2025/2504_10xxx/2504.10342/images/e518d4acbe9c161cd715fed617ffb95611fdb16cf38907210ed5d5f595521048.jpg b/data/2025/2504_10xxx/2504.10342/images/e518d4acbe9c161cd715fed617ffb95611fdb16cf38907210ed5d5f595521048.jpg new file mode 100644 index 0000000000000000000000000000000000000000..31b1dbc38568b465e55206f768ef58e1e26b81de --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/e518d4acbe9c161cd715fed617ffb95611fdb16cf38907210ed5d5f595521048.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a526e9cd05d19174c9769980cd1fb4dc253a93bc712014da7b42e05056a5d34 +size 1121 diff --git a/data/2025/2504_10xxx/2504.10342/images/e8a8f856fd35aa28d63335409c3b7e168800d1baf1d9d81477bd486902989eda.jpg b/data/2025/2504_10xxx/2504.10342/images/e8a8f856fd35aa28d63335409c3b7e168800d1baf1d9d81477bd486902989eda.jpg new file mode 100644 index 0000000000000000000000000000000000000000..14969505cb08e7252aa4dadeb08669d757ab1c20 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/e8a8f856fd35aa28d63335409c3b7e168800d1baf1d9d81477bd486902989eda.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89402a620005395ebf296f368d982c8a1062523d893f7665ccce43d30b63659d +size 3976 diff --git a/data/2025/2504_10xxx/2504.10342/images/e9bc9fcac4a2371d182b0f22eb739189041818bfaeb2e9d2b079bab6c84fa652.jpg b/data/2025/2504_10xxx/2504.10342/images/e9bc9fcac4a2371d182b0f22eb739189041818bfaeb2e9d2b079bab6c84fa652.jpg new file mode 100644 index 0000000000000000000000000000000000000000..926b84fa6631ee3447ed0f70906d95d126669116 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/e9bc9fcac4a2371d182b0f22eb739189041818bfaeb2e9d2b079bab6c84fa652.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0159a87c7bb06c3acf48d7239ba7ec62190fbecc1faf697feacf67e27a038e56 +size 5145 diff --git a/data/2025/2504_10xxx/2504.10342/images/eb994d835f7c12f06003e9f62bc37518d1ff14925e962e33d938b47b4b9b5b19.jpg b/data/2025/2504_10xxx/2504.10342/images/eb994d835f7c12f06003e9f62bc37518d1ff14925e962e33d938b47b4b9b5b19.jpg new file mode 100644 index 0000000000000000000000000000000000000000..94ec88f3b4c207a67b3c03ce020301a3f531aba9 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/eb994d835f7c12f06003e9f62bc37518d1ff14925e962e33d938b47b4b9b5b19.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0287562f44db24539ee281b9498764615d1a420f63acd911a2922035671d0a7 +size 959 diff --git a/data/2025/2504_10xxx/2504.10342/images/ec18a4ac4c2468c7500f4f846ec923d07b52c062bec7b6f9ea05e65fb8aa82cd.jpg b/data/2025/2504_10xxx/2504.10342/images/ec18a4ac4c2468c7500f4f846ec923d07b52c062bec7b6f9ea05e65fb8aa82cd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..31b1dbc38568b465e55206f768ef58e1e26b81de --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/ec18a4ac4c2468c7500f4f846ec923d07b52c062bec7b6f9ea05e65fb8aa82cd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a526e9cd05d19174c9769980cd1fb4dc253a93bc712014da7b42e05056a5d34 +size 1121 diff --git a/data/2025/2504_10xxx/2504.10342/images/ece4b18303218047608cad7c13202f459437b64ddffa0465d71243302f485618.jpg b/data/2025/2504_10xxx/2504.10342/images/ece4b18303218047608cad7c13202f459437b64ddffa0465d71243302f485618.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9d5def7a7230e9f60b0c2146827826c295b81c49 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/ece4b18303218047608cad7c13202f459437b64ddffa0465d71243302f485618.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6aad451dafb761c16c4d0c18398f51f059ad2a453ac5c51f4c6f1745546e0b08 +size 11967 diff --git a/data/2025/2504_10xxx/2504.10342/images/ecf21b7d9cbd5dd7fe914185b4dd246cda08fcb78323f43b632ec0f01026af01.jpg b/data/2025/2504_10xxx/2504.10342/images/ecf21b7d9cbd5dd7fe914185b4dd246cda08fcb78323f43b632ec0f01026af01.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4a26a3d608973aa69f45bc3cea73986e4f7807e2 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/ecf21b7d9cbd5dd7fe914185b4dd246cda08fcb78323f43b632ec0f01026af01.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46e06221369df0abd612b058bf30b84d4df442eae64e905102a72697db3962cd +size 1643 diff --git a/data/2025/2504_10xxx/2504.10342/images/ed5578a37fe0c76ed27e20a91c7422833b5c24ad0a429cef3aaf0b4bb114d8e2.jpg b/data/2025/2504_10xxx/2504.10342/images/ed5578a37fe0c76ed27e20a91c7422833b5c24ad0a429cef3aaf0b4bb114d8e2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..df7d2aaab8cb95d34c196a5f947dd5502d1ba79b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/ed5578a37fe0c76ed27e20a91c7422833b5c24ad0a429cef3aaf0b4bb114d8e2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78c114cb04d65e2e8e24260d92590d60372bac9ac3f8ce3ef6aa4ca3ef4ce94a +size 229866 diff --git a/data/2025/2504_10xxx/2504.10342/images/f2b1736102b305edc25f255215e05f78a8d01fa59b7ea9760565a410ccf255c6.jpg b/data/2025/2504_10xxx/2504.10342/images/f2b1736102b305edc25f255215e05f78a8d01fa59b7ea9760565a410ccf255c6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a42903fef19246df58498af0257b632b16e25a30 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/f2b1736102b305edc25f255215e05f78a8d01fa59b7ea9760565a410ccf255c6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:806aca52913037fe3ee75ab73a2441488be90f324cbcf6e16b115d7a3440ac76 +size 6605 diff --git a/data/2025/2504_10xxx/2504.10342/images/f37da933fc159e721e2936c6b277bfbc2689af3de9753bc8def52900ab628793.jpg b/data/2025/2504_10xxx/2504.10342/images/f37da933fc159e721e2936c6b277bfbc2689af3de9753bc8def52900ab628793.jpg new file mode 100644 index 0000000000000000000000000000000000000000..175262f4d613331890ef598081de6d443f3aeecb --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/f37da933fc159e721e2936c6b277bfbc2689af3de9753bc8def52900ab628793.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb988ebedc4e0ccc2e9b2059b6d988d0521cb00629e4cc24e3b2fd2a0007a7bd +size 7192 diff --git a/data/2025/2504_10xxx/2504.10342/images/f43ef894cc362d7c0b9717dfbc06a4a9667abb2d5aff08d0cb8cfc765e949aa3.jpg b/data/2025/2504_10xxx/2504.10342/images/f43ef894cc362d7c0b9717dfbc06a4a9667abb2d5aff08d0cb8cfc765e949aa3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bbb1f5c38eafb41ef97862e65e626ac2841c7ed9 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/f43ef894cc362d7c0b9717dfbc06a4a9667abb2d5aff08d0cb8cfc765e949aa3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:187dbd0571ea5aa40cd7ccd847faa78a1a0c0d7496db4d957009e757d5a350da +size 7286 diff --git a/data/2025/2504_10xxx/2504.10342/images/f9ae1628f4f27c67e42a309bcf565c5199ee7d184e1863bf889224ec5abf6921.jpg b/data/2025/2504_10xxx/2504.10342/images/f9ae1628f4f27c67e42a309bcf565c5199ee7d184e1863bf889224ec5abf6921.jpg new file mode 100644 index 0000000000000000000000000000000000000000..739e669fe1ac3dd16a5a5ea7cda0ae3ee6be45a4 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/f9ae1628f4f27c67e42a309bcf565c5199ee7d184e1863bf889224ec5abf6921.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b67bf02c7e11dbb3cf75b90ee2294faaeca93f7f40375dd4c961bba34319d267 +size 2034 diff --git a/data/2025/2504_10xxx/2504.10342/images/fb31c8ae8c6c1b5b7fe9668564add2294271e6b74599c26aa0c6c7eea16db2e7.jpg b/data/2025/2504_10xxx/2504.10342/images/fb31c8ae8c6c1b5b7fe9668564add2294271e6b74599c26aa0c6c7eea16db2e7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9bb4511158abe20d4013f5a7c1ca052c25aefa6d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/fb31c8ae8c6c1b5b7fe9668564add2294271e6b74599c26aa0c6c7eea16db2e7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85b03bab049a9eb7a8cc220b0065460bdf4a6b1e268ac680d9791558bd185a44 +size 2747 diff --git a/data/2025/2504_10xxx/2504.10342/images/fbc5dc8093e16ec24a3470e9d0985c3a334bafdc964c8c86c331e8622178f9d0.jpg b/data/2025/2504_10xxx/2504.10342/images/fbc5dc8093e16ec24a3470e9d0985c3a334bafdc964c8c86c331e8622178f9d0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5850edf8863be4ec2daa5110814691ea782dafec --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/fbc5dc8093e16ec24a3470e9d0985c3a334bafdc964c8c86c331e8622178f9d0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5bb12d67d881402bc2de44125cfdbfe3c645e079613fc14a77d18201a2385bf6 +size 165567 diff --git a/data/2025/2504_10xxx/2504.10342/images/fd296ebae832def4298738b303e6e0ecdc616dbe78e9eb939acb6ded360c9ebd.jpg b/data/2025/2504_10xxx/2504.10342/images/fd296ebae832def4298738b303e6e0ecdc616dbe78e9eb939acb6ded360c9ebd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..01c9e083b4a15c86ceee5ffb953195904af2fd73 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/images/fd296ebae832def4298738b303e6e0ecdc616dbe78e9eb939acb6ded360c9ebd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ad7a4bfcf814b74ab26cda340ca032fdda30b3d611329c965ee70703481895f +size 776 diff --git a/data/2025/2504_10xxx/2504.10342/layout.json b/data/2025/2504_10xxx/2504.10342/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..977d4ab30583cbd2f1b5e7710ed891abbb127f18 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10342/layout.json @@ -0,0 +1,79701 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 78, + 507, + 113 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 78, + 507, + 113 + ], + "spans": [ + { + "bbox": [ + 105, + 78, + 507, + 113 + ], + "type": "text", + "content": "VISUALPUZZLES: Decoupling Multimodal Reasoning Evaluation from Domain Knowledge" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 132, + 476, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 132, + 476, + 156 + ], + "spans": [ + { + "bbox": [ + 110, + 132, + 476, + 156 + ], + "type": "text", + "content": "Yueqi Song\\*, Tianyue Ou\\*, Yibo Kong†, Zecheng Li†, Graham Neubig, Xiang Yue {yueqis, tianyueo, gneubig, xyue2}@cs.cmu.edu" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 241, + 164, + 380, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 241, + 164, + 380, + 178 + ], + "spans": [ + { + "bbox": [ + 241, + 164, + 380, + 178 + ], + "type": "text", + "content": "Carnegie Mellon University" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 206, + 187, + 404, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 206, + 187, + 404, + 200 + ], + "spans": [ + { + "bbox": [ + 206, + 187, + 404, + 200 + ], + "type": "text", + "content": "https://neulab.github.io/VisualPuzzles/" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 280, + 211, + 331, + 224 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 280, + 211, + 331, + 224 + ], + "spans": [ + { + "bbox": [ + 280, + 211, + 331, + 224 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 140, + 239, + 471, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 239, + 471, + 495 + ], + "spans": [ + { + "bbox": [ + 140, + 239, + 471, + 495 + ], + "type": "text", + "content": "Current multimodal benchmarks often conflate reasoning with domain-specific knowledge, making it difficult to isolate and evaluate general reasoning abilities in non-expert settings. To address this, we introduce VISUALPUZZLES, a benchmark that targets visual reasoning while deliberately minimizing reliance on specialized knowledge. VISUALPUZZLES consists of diverse questions spanning five categories: algorithmic, analogical, deductive, inductive, and spatial reasoning. One major source of our questions is manually translated logical reasoning questions from the Chinese Civil Service Examination. Experiments show that VISUALPUZZLES requires significantly less intensive domain-specific knowledge and more complex reasoning compared to benchmarks like MMMU, enabling us to better evaluate genuine multimodal reasoning. Evaluations show that state-of-the-art multimodal large language models consistently lag behind human performance on VISUALPUZZLES, and that strong performance on knowledge-intensive benchmarks does not necessarily translate to success on reasoning-focused, knowledge-light tasks. Additionally, reasoning enhancements such as scaling up inference compute (with \"thinking\" modes) yield inconsistent gains across models and task types, and we observe no clear correlation between model size and performance. We also found that models exhibit different reasoning and answering patterns on VISUALPUZZLES compared to benchmarks with heavier emphasis on knowledge. VISUALPUZZLES offers a clearer lens through which to evaluate reasoning capabilities beyond factual recall and domain knowledge." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 106, + 512, + 504, + 626 + ], + "blocks": [ + { + "bbox": [ + 106, + 512, + 504, + 626 + ], + "lines": [ + { + "bbox": [ + 106, + 512, + 504, + 626 + ], + "spans": [ + { + "bbox": [ + 106, + 512, + 504, + 626 + ], + "type": "image", + "image_path": "b49fbe7e0699eadfaf858d3fd9a5d59db1d54bb1ba3708e26d31d65d8c714c0e.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 625, + 506, + 694 + ], + "lines": [ + { + "bbox": [ + 104, + 625, + 506, + 694 + ], + "spans": [ + { + "bbox": [ + 104, + 625, + 506, + 694 + ], + "type": "text", + "content": "Figure 1: Model accuracy on VISUALPUZZLES compared to human performance percentiles. All evaluated models fall below the human 5th percentile (57.5%), highlighting the difficulty of VISUALPUZZLES. Interestingly, models with explicit \"thinking\" modes do not consistently outperform their base versions, suggesting that current reasoning strategies do not yet generalize well to VISUALPUZZLES's scenarios, even though these strategies have proven effective in existing reasoning tasks that often rely heavily on domain-specific knowledge." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 14, + 210, + 37, + 561 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 210, + 37, + 561 + ], + "spans": [ + { + "bbox": [ + 14, + 210, + 37, + 561 + ], + "type": "text", + "content": "arXiv:2504.10342v3 [cs.CL] 30 Apr 2025" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 116, + 710, + 206, + 721 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 710, + 206, + 721 + ], + "spans": [ + { + "bbox": [ + 116, + 710, + 206, + 721 + ], + "type": "text", + "content": "*Equal Contributions." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 118, + 721, + 206, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 721, + 206, + 732 + ], + "spans": [ + { + "bbox": [ + 118, + 721, + 206, + 732 + ], + "type": "text", + "content": "Equal Contributions." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 79, + 239, + 177 + ], + "blocks": [ + { + "bbox": [ + 106, + 79, + 239, + 177 + ], + "lines": [ + { + "bbox": [ + 106, + 79, + 239, + 177 + ], + "spans": [ + { + "bbox": [ + 106, + 79, + 239, + 177 + ], + "type": "image", + "image_path": "92bbb61a0dc500ca4cc6ff7ae9e4bf125395e05e5543c7fc02bd64ec2111821d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 240, + 80, + 372, + 176 + ], + "blocks": [ + { + "bbox": [ + 240, + 80, + 372, + 176 + ], + "lines": [ + { + "bbox": [ + 240, + 80, + 372, + 176 + ], + "spans": [ + { + "bbox": [ + 240, + 80, + 372, + 176 + ], + "type": "image", + "image_path": "1ed8ee9704d40204c4a09619e1b5a1f5ac09cbf72757ce1849a071e4e1de96c6.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 373, + 80, + 504, + 176 + ], + "blocks": [ + { + "bbox": [ + 373, + 80, + 504, + 176 + ], + "lines": [ + { + "bbox": [ + 373, + 80, + 504, + 176 + ], + "spans": [ + { + "bbox": [ + 373, + 80, + 504, + 176 + ], + "type": "image", + "image_path": "78312d3960e121d282f214dae295f027c4d2a797ed5d6d36b898b32f29426c95.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 106, + 178, + 304, + 273 + ], + "blocks": [ + { + "bbox": [ + 106, + 178, + 304, + 273 + ], + "lines": [ + { + "bbox": [ + 106, + 178, + 304, + 273 + ], + "spans": [ + { + "bbox": [ + 106, + 178, + 304, + 273 + ], + "type": "image", + "image_path": "d6fcb4af5124da5e912611d8cc6c3d5089127f9bcd51a49bbc48a62769953913.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 132, + 281, + 477, + 295 + ], + "lines": [ + { + "bbox": [ + 132, + 281, + 477, + 295 + ], + "spans": [ + { + "bbox": [ + 132, + 281, + 477, + 295 + ], + "type": "text", + "content": "Figure 2: Example VISUALPUZZLES instances within each reasoning category" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 304, + 178, + 504, + 273 + ], + "blocks": [ + { + "bbox": [ + 304, + 178, + 504, + 273 + ], + "lines": [ + { + "bbox": [ + 304, + 178, + 504, + 273 + ], + "spans": [ + { + "bbox": [ + 304, + 178, + 504, + 273 + ], + "type": "image", + "image_path": "bf86655e1940e3c1363d3ce9ef3b6abf9cd477dffada06d559f29bcafaa6f316.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 318, + 196, + 331 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 318, + 196, + 331 + ], + "spans": [ + { + "bbox": [ + 105, + 318, + 196, + 331 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 357, + 506, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 357, + 506, + 435 + ], + "spans": [ + { + "bbox": [ + 104, + 357, + 506, + 435 + ], + "type": "text", + "content": "Reasoning is a cornerstone of both human and artificial intelligence, enabling systems to solve problems, draw inferences, and make decisions from information. Recent advances in multimodal large language models (MLLMs) (OpenAI, 2024; Liu et al., 2023a; Li et al., 2024; Dubey et al., 2024; Qwen Team, 2025a; Yue et al., 2025) exhibit early signs of reasoning in tackling complex tasks such as answering expert-level visual questions (Yue et al., 2024a;b), interpreting scientific diagrams (Roberts et al., 2024), and solving challenging math word problems (Lu et al., 2023)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 440, + 507, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 440, + 507, + 552 + ], + "spans": [ + { + "bbox": [ + 104, + 440, + 507, + 552 + ], + "type": "text", + "content": "Many of the tasks mentioned above are inherently knowledge-intensive; large amounts of knowledge in domains such as science or math are necessary to answer questions correctly (Yue et al., 2024a). However, in reality, reasoning does not necessitate knowledge. Even non-expert humans can successfully solve logic puzzles, spatial reasoning problems, and analogical tasks using general inferential skills, without requiring deep domain expertise. This raises an important question: Can we measure MLLMs's reasoning ability independently of measuring their acquisition of domain-specific knowledge? This question is particularly important with the recent rapid development of reasoning models in the textual domain (Jaech et al., 2024; DeepSeek-AI, 2025; Qwen Team, 2025b), and emerging application to the visual domain (Qwen Team, 2024)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 555, + 507, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 555, + 507, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 555, + 507, + 733 + ], + "type": "text", + "content": "To address this question, we introduce VISUALPUZZLES, a multimodal benchmark explicitly crafted to assess reasoning capabilities independent of specialized knowledge. VISUALPUZZLES comprises 1,168 carefully curated puzzle-like questions that span five distinct categories of reasoning: algorithmic, analogical, deductive, inductive, and spatial, each annotated with varying difficulty levels. VISUALPUZZLES only requires basic common knowledge and the information presented in the question to solve problems, disentangling reasoning from domain-specific knowledge recall. Our experiments show that VISUALPUZZLES requires significantly fewer domain-specific knowledge concepts compared to benchmarks like MMMU, and models have sufficient knowledge required to solve VISUALPUZZLES questions, enabling us to better assess multimodal reasoning versus pretrained factual knowledge. While VISUALPUZZLES minimizes reliance on domain expertise, its reasoning complexity exceeds that of existing benchmarks: in VISUALPUZZLES, " + }, + { + "bbox": [ + 104, + 555, + 507, + 733 + ], + "type": "inline_equation", + "content": "82.1\\%" + }, + { + "bbox": [ + 104, + 555, + 507, + 733 + ], + "type": "text", + "content": " of models' solution steps are logical reasoning steps, compared to " + }, + { + "bbox": [ + 104, + 555, + 507, + 733 + ], + "type": "inline_equation", + "content": "71.5\\%" + }, + { + "bbox": [ + 104, + 555, + 507, + 733 + ], + "type": "text", + "content": " in MMMU. Additionally, no current MLLM surpasses even the 5th-percentile human performance, highlighting the benchmark's difficulty and the limitations of today's models in general-purpose visual reasoning." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 506, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 506, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 506, + 118 + ], + "type": "text", + "content": "Our experiments with VISUALPUZZLES reveal critical limitations in current MLLMs' multimodal reasoning ability by factoring out domain-specific knowledge requirements and only focusing on reasoning. Specifically, we uncover four key findings:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 126, + 506, + 315 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 106, + 126, + 505, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 126, + 505, + 172 + ], + "spans": [ + { + "bbox": [ + 106, + 126, + 505, + 172 + ], + "type": "text", + "content": "- Strong performance on knowledge-heavy benchmarks does not transfer well. Models that rank highly on MathVista and MMMU often experience substantial performance drops on VISUALPUZZLES, highlighting a disconnect between knowledge-rich and knowledge-light multimodal reasoning tasks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 174, + 506, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 174, + 506, + 219 + ], + "spans": [ + { + "bbox": [ + 106, + 174, + 506, + 219 + ], + "type": "text", + "content": "- Humans outperform models on easy and medium tasks, while both degrade on harder ones. Human participants show strong and consistent performance on easy and medium-level questions across reasoning categories. In contrast, models struggle even on simpler tasks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 221, + 505, + 267 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 221, + 505, + 267 + ], + "spans": [ + { + "bbox": [ + 106, + 221, + 505, + 267 + ], + "type": "text", + "content": "- Reasoning enhancements (e.g., long CoT and \"thinking\" mode) yield inconsistent gains. While explicit reasoning strategies help certain models tackle complex reasoning tasks, these techniques do not consistently improve performance across all model families and task types." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 270, + 505, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 270, + 505, + 315 + ], + "spans": [ + { + "bbox": [ + 106, + 270, + 505, + 315 + ], + "type": "text", + "content": "- Scaling model size does not ensure stronger reasoning. We observe no clear trend indicating that larger models outperform smaller ones on VISUALPUZZLES, suggesting that scaling up parameters alone is insufficient to improve domain-agnostic multimodal reasoning." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 331, + 218, + 342 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 331, + 218, + 342 + ], + "spans": [ + { + "bbox": [ + 105, + 331, + 218, + 342 + ], + "type": "text", + "content": "2 VISUALPUZZLES" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 356, + 376, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 356, + 376, + 369 + ], + "spans": [ + { + "bbox": [ + 105, + 356, + 376, + 369 + ], + "type": "text", + "content": "2.1 Motivation and Design Principles of VISUALPUZZLES" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 376, + 506, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 376, + 506, + 400 + ], + "spans": [ + { + "bbox": [ + 104, + 376, + 506, + 400 + ], + "type": "text", + "content": "Existing benchmarks often conflate multimodal reasoning with domain-specific knowledge, making it difficult to isolate and measure the pure reasoning capabilities of these models." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 404, + 506, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 404, + 506, + 472 + ], + "spans": [ + { + "bbox": [ + 104, + 404, + 506, + 472 + ], + "type": "text", + "content": "VISUALPUZZLES is designed to explicitly address this issue by providing a testbed focused on evaluating multimodal reasoning in isolation from specialized knowledge. Specifically, VISUALPUZZLES centers on puzzle-like questions that rely solely on the provided image, question text, and basic common-sense reasoning. The core design principle behind VISUALPUZZLES is to limit the need for external or pretrained domain knowledge. Figure 2 shows examples of VISUALPUZZLES within each reasoning category." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 484, + 264, + 496 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 484, + 264, + 496 + ], + "spans": [ + { + "bbox": [ + 105, + 484, + 264, + 496 + ], + "type": "text", + "content": "2.2 Data Collection and Curation" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 505, + 506, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 505, + 506, + 540 + ], + "spans": [ + { + "bbox": [ + 104, + 505, + 506, + 540 + ], + "type": "text", + "content": "We curated VISUALPUZZLES using a multi-stage pipeline. The process involved sourcing, adapting, and validating questions with an emphasis on reasoning quality and minimal reliance on specialized knowledge." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 544, + 506, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 544, + 506, + 624 + ], + "spans": [ + { + "bbox": [ + 104, + 544, + 506, + 624 + ], + "type": "text", + "content": "Question Sourcing. We collected questions from three primary sources: (1) online resources and textbooks focused on logical, visual, and spatial puzzles, (2) synthesized items using images from large-scale vision datasets paired with text prompts, and (3) carefully repurposed items from existing multimodal reasoning benchmarks. Each source was selected to ensure a wide variety of reasoning challenges while avoiding trivial or fact-heavy questions. One major source of our questions is manually translated logical reasoning questions from the Chinese Civil Service Examination1. Other sources are listed in Appendix A." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 628, + 506, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 628, + 506, + 673 + ], + "spans": [ + { + "bbox": [ + 104, + 628, + 506, + 673 + ], + "type": "text", + "content": "Format Adaptation. All collected items were adapted into a consistent multiple-choice format with four options, balancing between text-based and image-based answer choices. This modality balance allows us to better test models' abilities to perform reasoning across diverse formats." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 677, + 507, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 507, + 714 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 507, + 714 + ], + "type": "text", + "content": "Data Validation. During curation, we applied strict filtering criteria to eliminate questions requiring advanced mathematical knowledge, specialized domain knowledge and facts. Questions were retained only if they could be solved using information present in the image," + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 719, + 463, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 719, + 463, + 733 + ], + "spans": [ + { + "bbox": [ + 116, + 719, + 463, + 733 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 116, + 719, + 463, + 733 + ], + "type": "text", + "content": " Chinese Civil Service Examination (Logic Test), 中国国家公务员考试行测(逻辑推理)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 117 + ], + "type": "text", + "content": "the question prompt, and basic common sense. A multi-round validation process was conducted by human annotators, focusing on question clarity, solvability, and reasoning type classification." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 121, + 480, + 134 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 121, + 480, + 134 + ], + "spans": [ + { + "bbox": [ + 105, + 121, + 480, + 134 + ], + "type": "text", + "content": "Attribute Annotation. Finally, each question was annotated with two key attributes:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 140, + 506, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 140, + 506, + 218 + ], + "spans": [ + { + "bbox": [ + 105, + 140, + 506, + 218 + ], + "type": "text", + "content": "- Reasoning Category: Each item was categorized as algorithmic, analogical, deductive, inductive, or spatial reasoning. These five categories were selected as they represent fundamental forms of reasoning widely discussed in literature (Liu et al., 2020; Lu et al., 2023; Yue et al., 2024a; Gao et al., 2023). At the same time, we aimed to balance comprehensiveness with conciseness, avoiding an overly fine-grained taxonomy that could dilute the benchmark's clarity and usability. This categorization ensures that VISUALPUZZLES covers a broad yet manageable set of reasoning skills relevant to multimodal LLM evaluation." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 125, + 220, + 505, + 281 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 125, + 220, + 428, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 220, + 428, + 232 + ], + "spans": [ + { + "bbox": [ + 125, + 220, + 428, + 232 + ], + "type": "text", + "content": "- Algorithmic Reasoning involves reasoning over algorithmic rules." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 125, + 232, + 505, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 232, + 505, + 244 + ], + "spans": [ + { + "bbox": [ + 125, + 232, + 505, + 244 + ], + "type": "text", + "content": "- Analogical Reasoning requires analyzing the relationships between a pair of entities." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 125, + 244, + 505, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 244, + 505, + 256 + ], + "spans": [ + { + "bbox": [ + 125, + 244, + 505, + 256 + ], + "type": "text", + "content": "- Deductive Reasoning involves logically drawing conclusions from known premises." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 125, + 256, + 468, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 256, + 468, + 268 + ], + "spans": [ + { + "bbox": [ + 125, + 256, + 468, + 268 + ], + "type": "text", + "content": "- Inductive Reasoning focuses on generalizing rules from observed patterns." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 125, + 269, + 485, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 269, + 485, + 281 + ], + "spans": [ + { + "bbox": [ + 125, + 269, + 485, + 281 + ], + "type": "text", + "content": "- Spatial Reasoning requires interpreting and manipulating spatial relationships." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 282, + 504, + 306 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 282, + 504, + 306 + ], + "spans": [ + { + "bbox": [ + 105, + 282, + 504, + 306 + ], + "type": "text", + "content": "- Difficulty Level: Labeled as easy, medium, or hard, based on annotators' estimated cognitive load and time-to-solve metrics." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 312, + 504, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 312, + 504, + 336 + ], + "spans": [ + { + "bbox": [ + 104, + 312, + 504, + 336 + ], + "type": "text", + "content": "This pipeline ensures that VISUALPUZZLES presents a diverse set of high-quality questions designed to challenge multimodal LLMs on their reasoning abilities without involving" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 336, + 244, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 336, + 244, + 347 + ], + "spans": [ + { + "bbox": [ + 105, + 336, + 244, + 347 + ], + "type": "text", + "content": "pretrained domain knowledge." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 359, + 208, + 370 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 359, + 208, + 370 + ], + "spans": [ + { + "bbox": [ + 105, + 359, + 208, + 370 + ], + "type": "text", + "content": "2.3 Dataset Statistics" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 380, + 312, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 380, + 312, + 447 + ], + "spans": [ + { + "bbox": [ + 104, + 380, + 312, + 447 + ], + "type": "text", + "content": "VISUALPUZZLES comprises 1,168 multimodal reasoning puzzles. It is designed to provide a balanced distribution across different reasoning categories, difficulty levels, and option formats for comprehensive evaluation. The statistics of VISUALPUZZLES are shown in Table 1." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 452, + 312, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 452, + 312, + 485 + ], + "spans": [ + { + "bbox": [ + 104, + 452, + 312, + 485 + ], + "type": "text", + "content": "Across the five reasoning types, we maintain a roughly even distribution, ensuring that no single reasoning style dominates the benchmark." + } + ] + } + ], + "index": 15 + }, + { + "type": "table", + "bbox": [ + 320, + 342, + 503, + 462 + ], + "blocks": [ + { + "bbox": [ + 320, + 342, + 503, + 462 + ], + "lines": [ + { + "bbox": [ + 320, + 342, + 503, + 462 + ], + "spans": [ + { + "bbox": [ + 320, + 342, + 503, + 462 + ], + "type": "table", + "html": "
CategoryStatistics
Total Questions1168
- Algorithmic Reasoning262
- Analogical Reasoning211
- Deductive Reasoning200
- Inductive Reasoning209
- Spatial Reasoning286
Easy/Medium/Hard46%/39%/15%
Option Type (Image/Text)57%/43%
AVG. Question Length154.9
% Easy Words54%
", + "image_path": "363631807b6384d0673ac736965367e311b38d5638c70e070bdfb37313c8d221.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "table_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 328, + 464, + 493, + 475 + ], + "lines": [ + { + "bbox": [ + 328, + 464, + 493, + 475 + ], + "spans": [ + { + "bbox": [ + 328, + 464, + 493, + 475 + ], + "type": "text", + "content": "Table 1: Statistics of VISUALPUZZLES" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 485, + 504, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 485, + 504, + 530 + ], + "spans": [ + { + "bbox": [ + 104, + 485, + 504, + 530 + ], + "type": "text", + "content": "Similarly, we balanced the dataset across the three difficulty levels (easy, medium, hard) to capture a wide spectrum of cognitive demands. Approximately half of the answer choices in the dataset are image-based and the other half are text-based, enabling evaluation of models' abilities to reason across diverse query formats." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 534, + 504, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 534, + 504, + 582 + ], + "spans": [ + { + "bbox": [ + 104, + 534, + 504, + 582 + ], + "type": "text", + "content": "In terms of language complexity, VISUALPUZZLES was constructed with an emphasis on accessibility. Most of the question text uses Basic English vocabulary2 to minimize the impact of linguistic complexity on reasoning performance, focusing the evaluation strictly on multimodal reasoning." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 586, + 504, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 586, + 504, + 620 + ], + "spans": [ + { + "bbox": [ + 104, + 586, + 504, + 620 + ], + "type": "text", + "content": "Compared to prior benchmarks, VISUALPUZZLES is unique in that it explicitly minimizes domain-specific knowledge requirements while maintaining high reasoning complexity. We demonstrate these traits of VISUALPUZZLES in Section 5." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 635, + 261, + 649 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 635, + 261, + 649 + ], + "spans": [ + { + "bbox": [ + 105, + 635, + 261, + 649 + ], + "type": "text", + "content": "3 Experiments and Results" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 659, + 222, + 673 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 659, + 222, + 673 + ], + "spans": [ + { + "bbox": [ + 105, + 659, + 222, + 673 + ], + "type": "text", + "content": "3.1 Experimental Setup" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 680, + 506, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 680, + 506, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 680, + 506, + 715 + ], + "type": "text", + "content": "We comprehensively evaluated the reasoning abilities of a variety of MLLMs on VISUALPUZZLES. Additionally, we performed human evaluations to better understand the gap between human and models' reasoning capabilities." + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 720, + 403, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 720, + 403, + 731 + ], + "spans": [ + { + "bbox": [ + 116, + 720, + 403, + 731 + ], + "type": "text", + "content": "2https://en.wiktionary.org/wiki/Appendix:Basic_English_word_list" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "content": "We selected a diverse set of proprietary and open MLLMs to ensure broad coverage in terms of model architecture, training scale, and intended application domains. This diversity allows us to capture a wide spectrum of current approaches and capabilities in the field. We integrated VISUALPUZZLES into Lmms-eval (Li* et al., 2024)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 132, + 506, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 132, + 506, + 210 + ], + "spans": [ + { + "bbox": [ + 104, + 132, + 506, + 210 + ], + "type": "text", + "content": "Proprietary Models. We evaluate several leading proprietary models that represent the current state of the art: (1) GPT-4o, o1, o3, and o4-mini (OpenAI, 2024; Jaech et al., 2024); (2) Gemini-1.5-Pro, Gemini-2.0-Flash, Gemini-2.0-Flash-Thinking, and Gemini-2.5-Pro (Gemini et al., 2023); (3) Claude-3.5-Sonnet and Claude-3.7-Sonnet (Anthropic, 2022). Among these, o1, o3, o4-mini are explicitly optimized for reasoning, while Gemini-2.0-Flash-Thinking and Claude-3.7-Sonnet incorporate dedicated modules for extensive step-by-step problem-solving." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 214, + 506, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 214, + 506, + 293 + ], + "spans": [ + { + "bbox": [ + 104, + 214, + 506, + 293 + ], + "type": "text", + "content": "Open Models. We further evaluate widely used open MLLMs to gauge how open models compare against proprietary models: (1) LLaVA Series (Liu et al., 2023a; 2024a; Li et al., 2024): LLaVA-1.5 (7B/13B), LLaVA-1.6 (7B/13B/34B), and LLaVA-OV (0.5B/7B/72B); (2) Llama-3.2-Vision-Instruct (11B/90B) (Dubey et al., 2024); (3) Qwen-VL Series (Bai et al., 2024; Yang et al., 2024; Qwen Team, 2025a; 2024): including Qwen-VL, Qwen2-VL (2B/7B/72B-Instruct), Qwen2.5-VL (3B/7B/72B-Instruct), and QvQ-72B-Preview; (4) Cambrian (8B/13B) (Tong et al., 2024); (5) Pangea-7B (Yue et al., 2025)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 297, + 504, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 297, + 504, + 344 + ], + "spans": [ + { + "bbox": [ + 104, + 297, + 504, + 344 + ], + "type": "text", + "content": "We apply both direct multiple-choice prompting and Chain-of-Thought (CoT) prompting to each model, following recent findings that CoT can significantly enhance model reasoning on complex multimodal tasks. For each model we report the best performance, whether achieved by direct multiple-choice prompting or CoT prompting." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 347, + 506, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 347, + 506, + 447 + ], + "spans": [ + { + "bbox": [ + 104, + 347, + 506, + 447 + ], + "type": "text", + "content": "Human Performance. To establish a strong baseline for comparison, we conducted human evaluations with 70 college-level volunteers. Human performance provides a valuable upper-bound reference for assessing the current capabilities and limitations of multimodal reasoning models. While this serves as a benchmark for present-day systems, it is possible that future models could surpass this level of performance. Each participant was randomly assigned a subset of the puzzles and completed them under the same resource-constrained conditions as the models (i.e., without access to external tools or the internet). On average, participants completed each puzzle in 78 seconds, reflecting the typical cognitive load and time demands imposed by VISUALPUZZLES." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 460, + 201, + 471 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 460, + 201, + 471 + ], + "spans": [ + { + "bbox": [ + 105, + 460, + 201, + 471 + ], + "type": "text", + "content": "3.2 Overall Results" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 480, + 504, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 480, + 504, + 548 + ], + "spans": [ + { + "bbox": [ + 104, + 480, + 504, + 548 + ], + "type": "text", + "content": "Table 2 and Figure 1 compare the performance of humans and a selected set of models.3 All evaluated models, even the proprietary ones, perform below the 4th percentile of human accuracy, underscoring the significant gap in multimodal reasoning abilities. These results reinforce our finding that, although models have made progress in multimodal understanding, there remains a substantial margin for improvement before they can match or surpass human performance on multimodal reasoning." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 552, + 504, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 552, + 504, + 620 + ], + "spans": [ + { + "bbox": [ + 104, + 552, + 504, + 620 + ], + "type": "text", + "content": "This pattern holds across categories as well. In Table 2, top human participants (95th percentile) exhibit near-perfect accuracy on multiple reasoning categories, while model performance remains substantially lower, even lower than the worst human performance (5th percentile). These results emphasize the need for continued innovation in model architectures and training paradigms if we aim to close the gap between model and human intelligence on complex multimodal reasoning." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 634, + 409, + 649 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 634, + 409, + 649 + ], + "spans": [ + { + "bbox": [ + 104, + 634, + 409, + 649 + ], + "type": "text", + "content": "4 Disentangling Reasoning from Domain Knowledge" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 659, + 318, + 672 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 659, + 318, + 672 + ], + "spans": [ + { + "bbox": [ + 104, + 659, + 318, + 672 + ], + "type": "text", + "content": "4.1 Knowledge Intensity of VISUALPUZZLES" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 680, + 504, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 680, + 504, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 680, + 504, + 704 + ], + "type": "text", + "content": "Is VISUALPUZZLES less knowledge-intensive than existing reasoning benchmarks? This question is central to our goal of disentangling reasoning ability from domain-specific" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 710, + 504, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 710, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 710, + 504, + 733 + ], + "type": "text", + "content": "3Full results for every model discussed in Section 3 are provided in Appendix D, including separate performance outcomes for both direct multiple-choice and CoT prompting." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 105, + 79, + 507, + 366 + ], + "blocks": [ + { + "bbox": [ + 105, + 79, + 507, + 366 + ], + "lines": [ + { + "bbox": [ + 105, + 79, + 507, + 366 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 507, + 366 + ], + "type": "table", + "html": "
ModelAlgorithmsAnalogicalDeductiveInductiveSpatialOverall
Random Choice25.025.025.025.025.025.0
Human (95th Percentile)100.0100.0100.081.6100.089.3
Human (50th Percentile)88.066.080.050.090.075.0
Human (5th Percentile)68.125.037.00.059.157.5
Proprietary Models
GPT-4o49.258.349.027.326.241.3
o163.768.367.529.234.351.8
o364.568.369.527.342.754.0
o4-mini65.368.775.533.045.557.0
Gemini-2.0-flash55.358.857.024.431.845.0
Gemini-2.0-flash-thinking46.670.149.024.925.542.2
Gemini-2.5-pro60.064.060.029.736.449.5
Claude-3.7-Sonnet64.548.365.026.837.448.3
Claude-3.7-Sonnet-Thinking67.244.161.531.137.148.2
Open Models (Qwen-Based)
LLaVA-OV-7B27.528.040.524.428.029.4
Pangea-7B32.423.738.528.732.531.3
Qwen2.5-VL-7B-Instruct38.223.751.524.931.133.7
LLaVA-OV-72B34.726.537.027.328.730.8
QvQ-72B-Preview44.843.644.026.830.837.8
Qwen2.5-VL-72B-Instruct53.446.958.025.829.542.3
Open Models (Llama-Based)
Cambrian-8B31.324.236.024.029.028.9
Llama-3.2-11B-Vision-Instruct31.030.839.021.126.229.4
Llama-3.2-90B-Vision-Instruct45.023.243.026.331.534.1
", + "image_path": "fbc5dc8093e16ec24a3470e9d0985c3a334bafdc964c8c86c331e8622178f9d0.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 428, + 504, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 428, + 504, + 462 + ], + "spans": [ + { + "bbox": [ + 104, + 428, + 504, + 462 + ], + "type": "text", + "content": "knowledge. Many current benchmarks blur this line, making it difficult to assess general reasoning in non-expert settings. VISUALPUZZLES was designed to target visual reasoning skills while deliberately minimizing reliance on specialized knowledge." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 467, + 504, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 467, + 504, + 556 + ], + "spans": [ + { + "bbox": [ + 104, + 467, + 504, + 556 + ], + "type": "text", + "content": "To test whether VISUALPUZZLES achieves this goal, we prompted GPT-4o to generate \"knowledge concept checklists\" for 50 randomly selected questions from a widely-used knowledge-intensive reasoning dataset MMMU and 50 from VISUALPUZZLES. We manually verified each question as discussed in subsection E.3. Each checklist comprises knowledge-specific questions intended to assess whether a model possesses the background information required to solve the original problem. For example, if a question depends on understanding two distinct physics laws, its checklist would include a question to explain each. The number of checklist items per instance serves as a proxy for knowledge intensity." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 560, + 325, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 560, + 325, + 649 + ], + "spans": [ + { + "bbox": [ + 104, + 560, + 325, + 649 + ], + "type": "text", + "content": "We found that MMMU problems resulted in significantly more checklist items on average (3.9) compared to VISUALPUZZLES (1.1), as shown in Table 3. This supports the hypothesis that VISUALPUZZLES is substantially less reliant on domain knowledge. As a result, performance on VISUALPUZZLES more directly reflects a model's ability to reason over visual and textual content, offering" + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 333, + 559, + 503, + 604 + ], + "blocks": [ + { + "bbox": [ + 104, + 374, + 504, + 413 + ], + "lines": [ + { + "bbox": [ + 104, + 374, + 504, + 413 + ], + "spans": [ + { + "bbox": [ + 104, + 374, + 504, + 413 + ], + "type": "text", + "content": "Table 2: Performance (%) comparison of humans and selected models on VISUALPUZZLES. We report the best performance resulting from direct multiple-choice prompting and CoT prompting for each method. We highlighted all the reasoning models." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 333, + 559, + 503, + 604 + ], + "lines": [ + { + "bbox": [ + 333, + 559, + 503, + 604 + ], + "spans": [ + { + "bbox": [ + 333, + 559, + 503, + 604 + ], + "type": "table", + "html": "
Benchmark# Knowledge Qs.
MMMU3.9
VISUALPUZZLES1.1
", + "image_path": "5abe78030b7037208b41af28065a5b555d55d83ad1baf370ba0a12ff565461b1.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 330, + 607, + 504, + 640 + ], + "lines": [ + { + "bbox": [ + 330, + 607, + 504, + 640 + ], + "spans": [ + { + "bbox": [ + 330, + 607, + 504, + 640 + ], + "type": "text", + "content": "Table 3: AVG. number of knowledge concept questions generated per instance on MMMU vs. VISUALPUZZLES." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 649, + 504, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 649, + 504, + 672 + ], + "spans": [ + { + "bbox": [ + 104, + 649, + 504, + 672 + ], + "type": "text", + "content": "a clearer signal of progress in multimodal reasoning. Full prompt examples and further discussion are provided in Appendix E." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "type": "text", + "content": "Do models already possess the knowledge required to solve VISUALPUZZLES? To explore this, we measured models' knowledge accuracy—their ability to answer the knowledge checklist questions correctly—on both benchmarks. This metric reflects how much of the required knowledge is already known by the model, independent of reasoning. We found a stark contrast: while many models exceed " + }, + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 104, + 677, + 505, + 733 + ], + "type": "text", + "content": " knowledge accuracy on VISUALPUZZLES," + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 83, + 317, + 209 + ], + "blocks": [ + { + "bbox": [ + 109, + 83, + 317, + 209 + ], + "lines": [ + { + "bbox": [ + 109, + 83, + 317, + 209 + ], + "spans": [ + { + "bbox": [ + 109, + 83, + 317, + 209 + ], + "type": "image", + "image_path": "422bc896f6eb461b560d28a6bcdb7b46675ce08dc157a69ee3ea72239fe20f5f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 318, + 82, + 503, + 209 + ], + "blocks": [ + { + "bbox": [ + 318, + 82, + 503, + 209 + ], + "lines": [ + { + "bbox": [ + 318, + 82, + 503, + 209 + ], + "spans": [ + { + "bbox": [ + 318, + 82, + 503, + 209 + ], + "type": "image", + "image_path": "dd2c3eda1142f57bd67e35d01e2305f06caa0abe215815db4166a95aaa2a731c.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 109, + 217, + 315, + 342 + ], + "blocks": [ + { + "bbox": [ + 109, + 217, + 315, + 342 + ], + "lines": [ + { + "bbox": [ + 109, + 217, + 315, + 342 + ], + "spans": [ + { + "bbox": [ + 109, + 217, + 315, + 342 + ], + "type": "image", + "image_path": "8ac1191f2d067a68d549fd32aad8c30f764923b7f3097b435600a39acd0bbc64.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 348, + 504, + 411 + ], + "lines": [ + { + "bbox": [ + 104, + 348, + 504, + 411 + ], + "spans": [ + { + "bbox": [ + 104, + 348, + 504, + 411 + ], + "type": "text", + "content": "Figure 3: Scatter plots with trend lines of the relationship between accuracy and model size (top) and the relationship between reasoning and knowledge accuracy (bottom) on MMMU and VISUALPUZZLES. The dots' sizes represent relative model sizes. The correlation between reasoning accuracy and knowledge accuracy is higher on MMMU (0.8) than on VISUALPUZZLES (0.4)." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 316, + 217, + 500, + 342 + ], + "blocks": [ + { + "bbox": [ + 316, + 217, + 500, + 342 + ], + "lines": [ + { + "bbox": [ + 316, + 217, + 500, + 342 + ], + "spans": [ + { + "bbox": [ + 316, + 217, + 500, + 342 + ], + "type": "image", + "image_path": "085189f854c325765c80bfc7f114017ecfa432448aa0ef26f6e006d72565f3fc.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 430, + 506, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 430, + 506, + 464 + ], + "spans": [ + { + "bbox": [ + 104, + 430, + 506, + 464 + ], + "type": "text", + "content": "most score below " + }, + { + "bbox": [ + 104, + 430, + 506, + 464 + ], + "type": "inline_equation", + "content": "60\\%" + }, + { + "bbox": [ + 104, + 430, + 506, + 464 + ], + "type": "text", + "content": " on MMMU, with smaller models frequently dropping under " + }, + { + "bbox": [ + 104, + 430, + 506, + 464 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 104, + 430, + 506, + 464 + ], + "type": "text", + "content": ". Only the largest models approach " + }, + { + "bbox": [ + 104, + 430, + 506, + 464 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 104, + 430, + 506, + 464 + ], + "type": "text", + "content": " accuracy on MMMU, underscoring its heavier reliance on domain-specific knowledge." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 468, + 505, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 468, + 505, + 492 + ], + "spans": [ + { + "bbox": [ + 104, + 468, + 505, + 492 + ], + "type": "text", + "content": "Does scaling up model size improve performance? We also plot reasoning accuracy (i.e., overall performance on the benchmark) in Figure 3, revealing some interesting trends:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 499, + 504, + 613 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 105, + 499, + 504, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 499, + 504, + 544 + ], + "spans": [ + { + "bbox": [ + 105, + 499, + 504, + 544 + ], + "type": "text", + "content": "- MMMU. Larger models tend to have higher knowledge accuracy, and this often translates into higher overall benchmark performance. This aligns with MMMU's reliance on domain-specific understanding; models with more parameters and training data are better at recalling relevant factual knowledge, thus improving their overall performance." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 547, + 504, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 547, + 504, + 613 + ], + "spans": [ + { + "bbox": [ + 105, + 547, + 504, + 613 + ], + "type": "text", + "content": "- VISUALPUZZLES. Although many models achieve near- " + }, + { + "bbox": [ + 105, + 547, + 504, + 613 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 105, + 547, + 504, + 613 + ], + "type": "text", + "content": " knowledge accuracy on VISUALPUZZLES, we observe no clear increase in both knowledge and reasoning accuracy as model size grows. In contrast to MMMU, simply scaling number of parameters does not guarantee better performance on VISUALPUZZLES, implying that further gains on VISUALPUZZLES must stem from improvements in models' reasoning abilities rather than reliance on extensive knowledge." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": "What is the relationship between knowledge and reasoning? Figure 3 shows two scatter plots with trend lines that measure how knowledge accuracy correlates with reasoning accuracy across different open models, where the relative sizes of the dots represent the sizes of the models. On MMMU (left), there is a strong positive correlation (0.8), suggesting that a model possessing more knowledge strongly correlates better reasoning performance. In contrast, VISUALPUZZLES (right) exhibits a more modest correlation (0.4). Although there is still an upward trend, gains in knowledge accuracy lead to smaller improvements in reasoning accuracy. This discrepancy implies that while overcoming knowledge gaps is central to reasoning success on MMMU, VISUALPUZZLES tasks demand more nuanced inference steps that depends less on domain knowledge." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 506, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 506, + 150 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 506, + 150 + ], + "type": "text", + "content": "Overall, these findings reinforce that VISUALPUZZLES's comparatively lower knowledge requirements are readily met by both proprietary and open models. By contrast, MMMU poses a greater challenge to smaller models in terms of knowledge, for which scaling in size clearly benefits knowledge-intensive tasks. However, on VISUALPUZZLES, larger model size alone is not a decisive factor, which might imply that genuine multimodal reasoning depends on more than just number of parameters or pre-trained knowledge." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 163, + 326, + 175 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 163, + 326, + 175 + ], + "spans": [ + { + "bbox": [ + 104, + 163, + 326, + 175 + ], + "type": "text", + "content": "4.2 Reasoning Complexity of VISUALPUZZLES" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 182, + 504, + 205 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 182, + 504, + 205 + ], + "spans": [ + { + "bbox": [ + 104, + 182, + 504, + 205 + ], + "type": "text", + "content": "Do questions in VISUALPUZZLES require more complex reasoning than those in existing benchmarks like MMMU?" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 211, + 324, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 211, + 324, + 277 + ], + "spans": [ + { + "bbox": [ + 104, + 211, + 324, + 277 + ], + "type": "text", + "content": "Besides observing that models generally achieve lower accuracy on VISUALPUZZLES compared to MMMU, we further investigated whether this gap stems from increased reasoning complexity. To do so, we measured the proportion of reasoning steps required to solve each question. We began" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 277, + 506, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 277, + 506, + 366 + ], + "spans": [ + { + "bbox": [ + 104, + 277, + 506, + 366 + ], + "type": "text", + "content": "by gathering detailed, step-by-step solutions from the models for each question, which are manually verified for completeness. Then we classified if each step is a logical reasoning step with the help of LLM. We show the result in Table 4. On average, logical reasoning steps take up " + }, + { + "bbox": [ + 104, + 277, + 506, + 366 + ], + "type": "inline_equation", + "content": "14.8\\%" + }, + { + "bbox": [ + 104, + 277, + 506, + 366 + ], + "type": "text", + "content": " more total steps in solving VISUALPUZZLES questions compared to those of MMMU (82.1% v.s. 71.5%). This analysis is based on GPT-4o and Gemini-2.0-Flash across 200 randomly sampled questions per benchmark. These results suggest that VISUALPUZZLES demand more extensive reasoning, aligning with its goal of evaluating deeper multimodal reasoning beyond factual recall. Prompt example is shown in Appendix F." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 333, + 209, + 503, + 244 + ], + "blocks": [ + { + "bbox": [ + 333, + 209, + 503, + 244 + ], + "lines": [ + { + "bbox": [ + 333, + 209, + 503, + 244 + ], + "spans": [ + { + "bbox": [ + 333, + 209, + 503, + 244 + ], + "type": "table", + "html": "
ModelMMMUVISUALPUZZLES
GPT-4o75.1%87.0%
Gemini-2.0-Flash67.9%77.3%
", + "image_path": "574525c730940cd3774fe74971f3aadf225b9b9840f7ef41a6416ea224eee334.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 330, + 247, + 504, + 271 + ], + "lines": [ + { + "bbox": [ + 330, + 247, + 504, + 271 + ], + "spans": [ + { + "bbox": [ + 330, + 247, + 504, + 271 + ], + "type": "text", + "content": "Table 4: Percentage of logical reasoning steps in solving benchmark questions." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 378, + 402, + 391 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 378, + 402, + 391 + ], + "spans": [ + { + "bbox": [ + 104, + 378, + 402, + 391 + ], + "type": "text", + "content": "4.3 Do Reasoning Models Perform Better than Their Baselines?" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 108, + 406, + 239, + 492 + ], + "blocks": [ + { + "bbox": [ + 108, + 406, + 239, + 492 + ], + "lines": [ + { + "bbox": [ + 108, + 406, + 239, + 492 + ], + "spans": [ + { + "bbox": [ + 108, + 406, + 239, + 492 + ], + "type": "image", + "image_path": "097011d809433a79022988b41408051bc1ba5980c55cdc75b274a14638a0f3d4.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 496, + 506, + 574 + ], + "lines": [ + { + "bbox": [ + 104, + 496, + 506, + 574 + ], + "spans": [ + { + "bbox": [ + 104, + 496, + 506, + 574 + ], + "type": "text", + "content": "Figure 4: Comparison of accuracy and average number of total completion tokens of reasoning models and their general counterparts on VISUALPUZZLES. We didn't include Gemini-2.0-Flash models here because Gemini-2.0-Flash-Thinking does not reveal the number of reasoning tokens of responses. The accuracies of Gemini-2.0-Flash and Gemini-2.0-Flash-Thinking is " + }, + { + "bbox": [ + 104, + 496, + 506, + 574 + ], + "type": "inline_equation", + "content": "45.0\\%" + }, + { + "bbox": [ + 104, + 496, + 506, + 574 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 496, + 506, + 574 + ], + "type": "inline_equation", + "content": "42.2\\%" + }, + { + "bbox": [ + 104, + 496, + 506, + 574 + ], + "type": "text", + "content": " respectively. Despite much higher number of completion tokens, reasoning models do not often achieve better performance on VISUALPUZZLES." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 242, + 406, + 371, + 492 + ], + "blocks": [ + { + "bbox": [ + 242, + 406, + 371, + 492 + ], + "lines": [ + { + "bbox": [ + 242, + 406, + 371, + 492 + ], + "spans": [ + { + "bbox": [ + 242, + 406, + 371, + 492 + ], + "type": "image", + "image_path": "9d2390fe3c1b50f4c9e0c3fc610d172eb6b346f542c089a5ccbaa752243e3aff.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 373, + 406, + 503, + 493 + ], + "blocks": [ + { + "bbox": [ + 373, + 406, + 503, + 493 + ], + "lines": [ + { + "bbox": [ + 373, + 406, + 503, + 493 + ], + "spans": [ + { + "bbox": [ + 373, + 406, + 503, + 493 + ], + "type": "image", + "image_path": "b3f117d6f51d23f162333ef36f7641cdcf69a8d721c3509ea76381b786e355f5.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 586, + 506, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 586, + 506, + 676 + ], + "spans": [ + { + "bbox": [ + 104, + 586, + 506, + 676 + ], + "type": "text", + "content": "Recent reasoning models often scale up inference compute by generating longer chains of thought (CoTs) to enhance reasoning ability. To assess the effectiveness of this strategy on VISUALPUZZLES, we compare several reasoning models with their non-reasoning counterparts in Figure 4. The reasoning model o1 outperforms GPT-4o overall. However, structured \"thinking\" modes, despite much higher number of completion tokens, show no consistent benefit. Similarity of output further reveals that the thinking mode primarily increases vocabulary without meaningfully altering the underlying reasoning process, as illustrated in Figure 13." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 689, + 505, + 700 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 689, + 505, + 700 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 505, + 700 + ], + "type": "text", + "content": "4.4 Are Branching and Revalidation Reasoning Patterns Effective on VISUALPUZZLES?" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "As discussed in Section 4.3, reasoning-enabled models do not consistently outperform their non-reasoning counterparts on VISUALPUZZLES. To better understand this discrepancy, we" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 80, + 257, + 158 + ], + "blocks": [ + { + "bbox": [ + 107, + 80, + 257, + 158 + ], + "lines": [ + { + "bbox": [ + 107, + 80, + 257, + 158 + ], + "spans": [ + { + "bbox": [ + 107, + 80, + 257, + 158 + ], + "type": "image", + "image_path": "b536792942d0b01abaf60cfa156d36a936bb060f4df1b04a5055134b1d998792.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 161, + 506, + 207 + ], + "lines": [ + { + "bbox": [ + 104, + 161, + 506, + 207 + ], + "spans": [ + { + "bbox": [ + 104, + 161, + 506, + 207 + ], + "type": "text", + "content": "Figure 5: Comparison of Reasoning Pattern of Claude-3.7-Sonnet-Thinking on MMMU and VISUALPUZZLES. Left figure compares the accuracy of Claude-3.7-Sonnet and Claude-3.7-Sonnet-Thinking on MMMU and VISUALPUZZLES. Middle figure shows frequency of each pattern. Right figure shows correlation of the patterns with accuracy on the benchmarks." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 257, + 80, + 408, + 159 + ], + "blocks": [ + { + "bbox": [ + 257, + 80, + 408, + 159 + ], + "lines": [ + { + "bbox": [ + 257, + 80, + 408, + 159 + ], + "spans": [ + { + "bbox": [ + 257, + 80, + 408, + 159 + ], + "type": "image", + "image_path": "523a7fd5906283d19e4b6b98b68b5d07b68a0003d35bf321828d973d89158391.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 415, + 82, + 504, + 159 + ], + "blocks": [ + { + "bbox": [ + 415, + 82, + 504, + 159 + ], + "lines": [ + { + "bbox": [ + 415, + 82, + 504, + 159 + ], + "spans": [ + { + "bbox": [ + 415, + 82, + 504, + 159 + ], + "type": "image", + "image_path": "1662aa990cd75de3f0e4e15080707ff2d3e5d543c8a719718bfdc6202b92da36.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 221, + 506, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 221, + 506, + 258 + ], + "spans": [ + { + "bbox": [ + 104, + 221, + 506, + 258 + ], + "type": "text", + "content": "examine Claude-3.7-Sonnet-Thinking's reasoning behaviors present in long CoTs, specifically, branching and re-validation, which are known to play important roles in enhancing reasoning performance4." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 262, + 504, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 262, + 504, + 319 + ], + "spans": [ + { + "bbox": [ + 104, + 262, + 504, + 319 + ], + "type": "text", + "content": "As shown in Figure 5, our analysis reveals a striking contrast between benchmarks. On MMMU, both branching and re-validation correlate positively with model accuracy. These strategies help models explore alternative reasoning paths and revisit earlier steps, aiding in the retrieval of relevant factual knowledge, an essential component for solving MMMU's knowledge-intensive questions. An illustrative example is provided in Appendix E." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 323, + 298, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 323, + 298, + 434 + ], + "spans": [ + { + "bbox": [ + 104, + 323, + 298, + 434 + ], + "type": "text", + "content": "Surprisingly, on VISUALPUZZLES, these reasoning behaviors are more frequent, yet less predictive of success. Despite their increased presence in long-form responses, we observe no significant correlation between these strategies and task accuracy. This suggests that models may be using branching and re-validation in ways that do not meaningfully contribute to solving the problem." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 439, + 298, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 439, + 298, + 540 + ], + "spans": [ + { + "bbox": [ + 104, + 439, + 298, + 540 + ], + "type": "text", + "content": "Figure 6 highlights this with an example from Claude-3.7-Sonnet-Thinking, where the model applies branching on a VISUALPUZZLES puzzle. However, the additional reasoning paths remain shallow and fail to engage with the core challenge—understanding the spatial arrangement of chairs in the image. The full response is included in Appendix E." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 555, + 175, + 569 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 555, + 175, + 569 + ], + "spans": [ + { + "bbox": [ + 105, + 555, + 175, + 569 + ], + "type": "text", + "content": "5 Analysis" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 323, + 499, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 323, + 499, + 354 + ], + "spans": [ + { + "bbox": [ + 306, + 323, + 499, + 354 + ], + "type": "text", + "content": "Question: Given a pair of chairs arranged at a specific angle, select the option that matches the angle of the chairs in the question. (Answer: C)" + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 306, + 355, + 501, + 409 + ], + "blocks": [ + { + "bbox": [ + 306, + 355, + 501, + 409 + ], + "lines": [ + { + "bbox": [ + 306, + 355, + 501, + 409 + ], + "spans": [ + { + "bbox": [ + 306, + 355, + 501, + 409 + ], + "type": "image", + "image_path": "d90d53e47cf6e5230fb7578325d85a11aeb477745ec81d8999ae9db553e82ac7.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 533, + 506, + 574 + ], + "lines": [ + { + "bbox": [ + 302, + 533, + 506, + 574 + ], + "spans": [ + { + "bbox": [ + 302, + 533, + 506, + 574 + ], + "type": "text", + "content": "Figure 6: An example of Claude-3.7-Sonnet-Thinking utilizing branching to solve a VISUALPUZZLES puzzle." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 411, + 501, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 411, + 501, + 529 + ], + "spans": [ + { + "bbox": [ + 307, + 411, + 501, + 529 + ], + "type": "text", + "content": "Claude-3.7-Sonnet (With Thinking) \nLet me examine the chairs in the question: \n- The chairs are positioned 180 degrees apart. \n- They are shown in profile view. \nOption A: ...; Option B: ...; Option C: ...; Option D: ... Any of them is correct from an angular perspective.. \nHowever, looking more carefully at the exact orientation: \n- The original chairs are shown in perfect profile view \n- Option A shows chairs in perfect profile view, .... .. \nI'll go with Option A as it presents the cleanest profile view that matches the arrangement in the question." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 580, + 416, + 594 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 580, + 416, + 594 + ], + "spans": [ + { + "bbox": [ + 104, + 580, + 416, + 594 + ], + "type": "text", + "content": "5.1 Do Models Approach VISUALPUZZLES Questions Differently?" + } + ] + } + ], + "index": 14 + }, + { + "type": "table", + "bbox": [ + 277, + 599, + 503, + 647 + ], + "blocks": [ + { + "bbox": [ + 104, + 601, + 268, + 667 + ], + "lines": [ + { + "bbox": [ + 104, + 601, + 268, + 667 + ], + "spans": [ + { + "bbox": [ + 104, + 601, + 268, + 667 + ], + "type": "text", + "content": "Table 5 shows the statistics of Claude-3.7-Sonnet-Thinking's answering strategy. We observe a clear divergence in answering strategies between MMMU and VISUALPUZZLES. On MMMU, the" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 277, + 599, + 503, + 647 + ], + "lines": [ + { + "bbox": [ + 277, + 599, + 503, + 647 + ], + "spans": [ + { + "bbox": [ + 277, + 599, + 503, + 647 + ], + "type": "table", + "html": "
BenchmarkAnswer-FirstOption-First
MMMU29.3%70.7%
VISUALPUZZLES (Image Options)72.5%27.5%
VISUALPUZZLES (Text Options)98.3%1.7%
", + "image_path": "e39d29d3f3dbe47128d3e18cab78b68ccdeb0175590f89a101b716c36c4cfca3.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "table_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 325, + 648, + 453, + 661 + ], + "lines": [ + { + "bbox": [ + 325, + 648, + 453, + 661 + ], + "spans": [ + { + "bbox": [ + 325, + 648, + 453, + 661 + ], + "type": "text", + "content": "Table 5: Answering Strategy" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 667, + 504, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 667, + 504, + 713 + ], + "spans": [ + { + "bbox": [ + 104, + 667, + 504, + 713 + ], + "type": "text", + "content": "model tend to follow an option-driven approach—using the provided choices early to eliminate unlikely answers and select the most relevant one, often without explicitly solving the problem. In contrast, models more frequently adopt an answer-first strategy on VISUALPUZZLES, attempting to solve the question independently before comparing" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 720, + 453, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 720, + 453, + 733 + ], + "spans": [ + { + "bbox": [ + 116, + 720, + 453, + 733 + ], + "type": "text", + "content": "4We examined Claude-3.7-Sonnet-Thinking as it explicitly provides thinking output." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 504, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 504, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 504, + 118 + ], + "type": "text", + "content": "the result to the answer choices. This pattern holds across both textual and image-based options, though the option-first approach appears slightly more often (around " + }, + { + "bbox": [ + 104, + 81, + 504, + 118 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 104, + 81, + 504, + 118 + ], + "type": "text", + "content": ") for image-based tasks—likely due to the added complexity of visual comparison." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 134, + 424, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 134, + 424, + 148 + ], + "spans": [ + { + "bbox": [ + 105, + 134, + 424, + 148 + ], + "type": "text", + "content": "5.2 Does model performance transfer between reasoning categories?" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 109, + 156, + 276, + 308 + ], + "blocks": [ + { + "bbox": [ + 109, + 156, + 276, + 308 + ], + "lines": [ + { + "bbox": [ + 109, + 156, + 276, + 308 + ], + "spans": [ + { + "bbox": [ + 109, + 156, + 276, + 308 + ], + "type": "image", + "image_path": "97e95fdf454706c7da694622cce81f46164cd08513842a89a130211e7210be6f.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 315, + 288, + 349 + ], + "lines": [ + { + "bbox": [ + 104, + 315, + 288, + 349 + ], + "spans": [ + { + "bbox": [ + 104, + 315, + 288, + 349 + ], + "type": "text", + "content": "Figure 7: Correlation Heatmap among reasoning categories for models (averaged across all models we evaluated)." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 292, + 156, + 506, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 156, + 506, + 355 + ], + "spans": [ + { + "bbox": [ + 292, + 156, + 506, + 355 + ], + "type": "text", + "content": "Figure 7 presents a correlation heatmap illustrating the relationships among the five reasoning categories in VISUALPUZZLES. We report model correlations averaged across all models in Table 2. For humans, each reasoning category likely engages different cognitive or mental processes (Goel & Dolan, 2004; Green et al., 2010; Bright & Feeney, 2014; Babcock & Vallesi, 2015), so performance in one category might not transfer to performance in another. However, the correlation heatmap of the models tells a different story. We observe notably strong correlations across reasoning categories, with values ranging from 0.11 to as high as 0.94. In particular, algorithmic and deductive reasoning show high correlation (0.94), and other pairs such as algorithmic-analogical and deductive-analogical also exhibit strong associations. This suggests" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 355, + 504, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 355, + 504, + 411 + ], + "spans": [ + { + "bbox": [ + 104, + 355, + 504, + 411 + ], + "type": "text", + "content": "that model performance tends to generalize across categories. However, this generalization may not reflect true reasoning abilities. Instead, the high correlations could indicate that models are leveraging shared surface-level patterns or shortcut strategies that happen to work across multiple structurally different categories, unlike humans, who may rely on distinct cognitive processes." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 428, + 196, + 441 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 428, + 196, + 441 + ], + "spans": [ + { + "bbox": [ + 105, + 428, + 196, + 441 + ], + "type": "text", + "content": "5.3 Error Analysis" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 450, + 357, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 450, + 357, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 450, + 357, + 628 + ], + "type": "text", + "content": "Figure 8 shows a pie chart illustrating the distribution of error categories of 100 instances generated by Claude-3.7-Sonnet-Thinking on VISUALPUZZLES, revealing that reasoning errors dominate at " + }, + { + "bbox": [ + 104, + 450, + 357, + 628 + ], + "type": "inline_equation", + "content": "56\\%" + }, + { + "bbox": [ + 104, + 450, + 357, + 628 + ], + "type": "text", + "content": ", reinforcing the fact that reasoning is greatest challenge to models in VISUALPUZZLES. Perceptual errors " + }, + { + "bbox": [ + 104, + 450, + 357, + 628 + ], + "type": "inline_equation", + "content": "(21\\%)" + }, + { + "bbox": [ + 104, + 450, + 357, + 628 + ], + "type": "text", + "content": " and spatial / orientation errors " + }, + { + "bbox": [ + 104, + 450, + 357, + 628 + ], + "type": "inline_equation", + "content": "(17\\%)" + }, + { + "bbox": [ + 104, + 450, + 357, + 628 + ], + "type": "text", + "content": " also constitute substantial portions of failures, reflecting difficulties in interpreting visual elements and understanding spatial relationships. These three categories together account for " + }, + { + "bbox": [ + 104, + 450, + 357, + 628 + ], + "type": "inline_equation", + "content": "94\\%" + }, + { + "bbox": [ + 104, + 450, + 357, + 628 + ], + "type": "text", + "content": " of mistakes, emphasizing a need for multimodal models with stronger reasoning capabilities with more robust perception and spatial understanding. Textual and visual understanding errors " + }, + { + "bbox": [ + 104, + 450, + 357, + 628 + ], + "type": "inline_equation", + "content": "(4\\%)" + }, + { + "bbox": [ + 104, + 450, + 357, + 628 + ], + "type": "text", + "content": " and reject-to-answer cases " + }, + { + "bbox": [ + 104, + 450, + 357, + 628 + ], + "type": "inline_equation", + "content": "(2\\%)" + }, + { + "bbox": [ + 104, + 450, + 357, + 628 + ], + "type": "text", + "content": " are relatively rare. Appendix I shows samples of error and correct cases of each reasoning and difficulty category." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 364, + 426, + 504, + 596 + ], + "blocks": [ + { + "bbox": [ + 364, + 426, + 504, + 596 + ], + "lines": [ + { + "bbox": [ + 364, + 426, + 504, + 596 + ], + "spans": [ + { + "bbox": [ + 364, + 426, + 504, + 596 + ], + "type": "image", + "image_path": "9be1b71d51b868e947ea438ebbee99b5295830e1be6a90035757235c7e3f403e.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 362, + 597, + 504, + 622 + ], + "lines": [ + { + "bbox": [ + 362, + 597, + 504, + 622 + ], + "spans": [ + { + "bbox": [ + 362, + 597, + 504, + 622 + ], + "type": "text", + "content": "Figure 8: Error Distribution of Claude-3.7-Sonnet-Thinking" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 648, + 201, + 661 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 648, + 201, + 661 + ], + "spans": [ + { + "bbox": [ + 105, + 648, + 201, + 661 + ], + "type": "text", + "content": "6 Related Work" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 676, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 676, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 676, + 506, + 734 + ], + "type": "text", + "content": "Multimodal Language Models (MLLMs), particularly vision language models have experienced significant improvements recently. Large scale vision language models (Gemini et al., 2023); (OpenAI, 2024); (Anthropic, 2022); including open weight ones (Li et al., 2024); (Yue et al., 2025); (Liu et al., 2024b); (Tong et al., 2024); (Dubey et al., 2024) are capable of utilizing both image and text inputs to solve challenging questions." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 211, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 211, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 211, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 139 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 139 + ], + "type": "text", + "content": "Multimodal reasoning models, models that specialize in complex reasoning, further push the boundary of MLLMs' capabilities. Large scale multimodal reasoning models such as QVQ (Qwen Team, 2024), Claude-3.7-Sonnet-thinking (Anthropic, 2022), o1 (Jaech et al., 2024), Gemini-2.0-flash-thinking (Gemini et al., 2023) excel in reasoning heavy tasks such as coding and solving math problems." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 143, + 506, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 143, + 506, + 201 + ], + "spans": [ + { + "bbox": [ + 104, + 143, + 506, + 201 + ], + "type": "text", + "content": "Multimodal Reasoning Benchmarks. There exists a number of multimodal benchmarks that test on both the models' world knowledge and reasoning abilities. These benchmarks (Yue et al., 2024a); (Marino et al., 2019); (Liu et al., 2023b); (Yue et al., 2024b); (Authors, 2025) emphasize on the multimodal ability of models as a whole, without further separation of knowledge and reasoning." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 204, + 507, + 261 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 204, + 507, + 261 + ], + "spans": [ + { + "bbox": [ + 104, + 204, + 507, + 261 + ], + "type": "text", + "content": "Recently, more multimodal benchmarks have placed emphasis on multimodal logical reasoning abilities. Many of them (Lu et al., 2023); (Wang et al., 2024b) focus primarily on mathematic problems, testing on both mathematical knowledge and reasoning. Some others cover on more general logical reasoning problems (Cherian et al., 2022b); (Gao et al., 2023), testing on both models' knowledge and reasoning in different domains." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 281, + 285, + 294 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 281, + 285, + 294 + ], + "spans": [ + { + "bbox": [ + 105, + 281, + 285, + 294 + ], + "type": "text", + "content": "7 Conclusion and Future Work" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 308, + 507, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 308, + 507, + 399 + ], + "spans": [ + { + "bbox": [ + 104, + 308, + 507, + 399 + ], + "type": "text", + "content": "We presented VISUALPUZZLES, a novel multimodal benchmark carefully designed to minimize the impact of domain-specific knowledge and isolate models' core reasoning capabilities. Our results show that while proprietary and large-scale open models achieve relatively higher performance, they still fall short of human-level reasoning—especially on more complex tasks such as analogical and inductive reasoning. Moreover, we observe that strong performance on knowledge-intensive benchmarks like MathVista and MMMU does not necessarily translate into high accuracy on VISUALPUZZLES, underscoring the distinct challenge of knowledge-light reasoning tasks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 403, + 507, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 403, + 507, + 525 + ], + "spans": [ + { + "bbox": [ + 104, + 403, + 507, + 525 + ], + "type": "text", + "content": "These findings suggest that purely scaling model size and knowledge resources may not suffice for robust multimodal reasoning skills; rather, methods that promote structured reasoning, such as explicit thinking modes or recursive reasoning steps, can offer substantial improvements, particularly for hard questions. Future research can explore new training strategies, specialized architectures, or model interpretations tailored to reduce reliance on memorized facts and enhance logical inference. Extending VISUALPUZZLES to include additional types of multi-image reasoning or temporally dynamic visual information may further stress-test models' core inference abilities. By disentangling domain knowledge from multimodal reasoning, we hope VISUALPUZZLES will serve as a valuable tool for developing and evaluating next-generation MLLMs that excel at genuinely understanding and reasoning about the world without depending heavily on specialized factual knowledge." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 545, + 190, + 558 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 545, + 190, + 558 + ], + "spans": [ + { + "bbox": [ + 105, + 545, + 190, + 558 + ], + "type": "text", + "content": "8 Limitations" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 574, + 505, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 574, + 505, + 632 + ], + "spans": [ + { + "bbox": [ + 104, + 574, + 505, + 632 + ], + "type": "text", + "content": "Disentangling Knowledge Despite our best efforts to isolate domain-specific knowledge from the evaluation of multimodal reasoning, VISUALPUZZLES is still not entirely free of knowledge dependencies. Basic familiarity with everyday objects or common scenarios is still required; complete knowledge free evaluation remains an ideal rather than a practical reality." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 647, + 505, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 647, + 505, + 681 + ], + "spans": [ + { + "bbox": [ + 104, + 647, + 505, + 681 + ], + "type": "text", + "content": "Real World Application VISUALPUZZLES emphasizes puzzle-like questions that may not reflect the full diversity of real-world scenarios, limiting generalizability to more specialized domains." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": "Question Format VISUALPUZZLES focuses on multiple-choice questions, which may not capture the breadth of open-ended reasoning tasks where models must generate complex textual or visual outputs." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 118 + ], + "type": "text", + "content": "Future work can address these limitations by including more varied question formats, broader domains, and more granular analyses of a model's knowledge versus its multimodal reasoning abilities." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 133, + 222, + 146 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 133, + 222, + 146 + ], + "spans": [ + { + "bbox": [ + 105, + 133, + 222, + 146 + ], + "type": "text", + "content": "9 Ethical Statement" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 159, + 504, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 159, + 504, + 216 + ], + "spans": [ + { + "bbox": [ + 104, + 159, + 504, + 216 + ], + "type": "text", + "content": "This paper uses samples extracted from existing quiz sources for scholarly analysis and testing purposes, in accordance to US fair use law and standard practice. These data are neither intended for, nor capable of, substituting for the original works; thus, we believe their inclusion does not diminish the market value or utility of the source materials. A complete list of references for the data sources is attached in Appendix A." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 231, + 217, + 245 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 231, + 217, + 245 + ], + "spans": [ + { + "bbox": [ + 105, + 231, + 217, + 245 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 257, + 504, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 257, + 504, + 301 + ], + "spans": [ + { + "bbox": [ + 104, + 257, + 504, + 301 + ], + "type": "text", + "content": "This project was supported in part by a grant from DSTA Singapore and the Carnegie Bosch Institute. The authors would like to thank CMU NeuLab colleagues for their constructive comments. The authors would also like to thank all volunteers who participated in the human evaluation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 319, + 168, + 331 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 319, + 168, + 331 + ], + "spans": [ + { + "bbox": [ + 105, + 319, + 168, + 331 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 338, + 506, + 731 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 106, + 338, + 505, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 338, + 505, + 361 + ], + "spans": [ + { + "bbox": [ + 106, + 338, + 505, + 361 + ], + "type": "text", + "content": "https://www.anthropic.com/index/introducing-claudeAnthropic. Claude, 2022. URL https://www.anthropic.com/index/introducing-claude." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 370, + 505, + 393 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 370, + 505, + 393 + ], + "spans": [ + { + "bbox": [ + 107, + 370, + 505, + 393 + ], + "type": "text", + "content": "Humanity's Last Exam's Authors. Humanity's last exam. ArXiv, abs/2501.14249, 2025. URL https://api-semanticscholar.org/CorpusID:275906652." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 401, + 504, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 401, + 504, + 425 + ], + "spans": [ + { + "bbox": [ + 107, + 401, + 504, + 425 + ], + "type": "text", + "content": "Laura Babcock and Antonino Vallesi. The interaction of process and domain in prefrontal cortex during inductive reasoning. Neuropsychologia, 67:91-99, 2015." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 433, + 506, + 478 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 433, + 506, + 478 + ], + "spans": [ + { + "bbox": [ + 106, + 433, + 506, + 478 + ], + "type": "text", + "content": "Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-VL: A versatile vision-language model for understanding, localization, text reading, and beyond, 2024. URL https://openreview.net/forum?id=qrGjFJV13m." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 487, + 504, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 487, + 504, + 521 + ], + "spans": [ + { + "bbox": [ + 107, + 487, + 504, + 521 + ], + "type": "text", + "content": "Yonatan Bitton, Ron Yosef, Eliyahu Strugo, Dafna Shahaf, Roy Schwartz, and Gabriel Stanovsky. Vasr: Visual analogies of situation recognition. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 37, pp. 241-249, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 529, + 504, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 529, + 504, + 552 + ], + "spans": [ + { + "bbox": [ + 107, + 529, + 504, + 552 + ], + "type": "text", + "content": "Aimée K Bright and Aidan Feeney. Causal knowledge and the development of inductive reasoning. Journal of Experimental Child Psychology, 122:48-61, 2014." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 560, + 506, + 593 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 560, + 506, + 593 + ], + "spans": [ + { + "bbox": [ + 107, + 560, + 506, + 593 + ], + "type": "text", + "content": "Anoop Cherian, Kuan-Chuan Peng, Suhas Lohit, Kevin Smith, and Joshua B Tenenbaum. Are deep neural networks smarter than second graders? arXiv preprint arXiv:2212.09993, 2022a." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 602, + 506, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 602, + 506, + 647 + ], + "spans": [ + { + "bbox": [ + 107, + 602, + 506, + 647 + ], + "type": "text", + "content": "Anoop Cherian, Kuan-Chuan Peng, Suhas Lohit, Kevin A. Smith, and Joshua B. Tenenbaum. Are deep neural networks smarter than second graders? 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 10834-10844, 2022b. URL https://api-semanticscholar.org/CorpusID:254877678." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 656, + 504, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 656, + 504, + 679 + ], + "spans": [ + { + "bbox": [ + 107, + 656, + 504, + 679 + ], + "type": "text", + "content": "DeepSeek-AI. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning, 2025. URL https://arxiv.org/abs/2501.12948." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 687, + 506, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 687, + 506, + 731 + ], + "spans": [ + { + "bbox": [ + 107, + 687, + 506, + 731 + ], + "type": "text", + "content": "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. ArXiv preprint, abs/2407.21783, 2024. URL https://arxiv.org/abs/2407.21783." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 211, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 211, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 211, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 732 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 507, + 117 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 507, + 117 + ], + "type": "text", + "content": "Jingying Gao, Qi Wu, Alan Blair, and Maurice Pagnucco. Lora: A logical reasoning augmented dataset for visual question answering. In Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2023." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 121, + 507, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 121, + 507, + 167 + ], + "spans": [ + { + "bbox": [ + 105, + 121, + 507, + 167 + ], + "type": "text", + "content": "Gemini, Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, et al. Gemini: a family of highly capable multimodal models. ArXiv preprint, abs/2312.11805, 2023. URL https://arxiv.org/abs/2312.11805." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 173, + 505, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 173, + 505, + 198 + ], + "spans": [ + { + "bbox": [ + 105, + 173, + 505, + 198 + ], + "type": "text", + "content": "Vinod Goel and Raymond J Dolan. Differential involvement of left prefrontal cortex in inductive and deductive reasoning. Cognition, 93(3):B109-B121, 2004." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 202, + 505, + 238 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 202, + 505, + 238 + ], + "spans": [ + { + "bbox": [ + 105, + 202, + 505, + 238 + ], + "type": "text", + "content": "Adam E Green, David JM Kraemer, Jonathan A Fugelsang, Jeremy R Gray, and Kevin N Dunbar. Connecting long distance: semantic distance in analogical reasoning modulates frontopolar cortex activity. Cerebral cortex, 20(1):70-76, 2010." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 243, + 507, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 243, + 507, + 277 + ], + "spans": [ + { + "bbox": [ + 105, + 243, + 507, + 277 + ], + "type": "text", + "content": "Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 283, + 505, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 283, + 505, + 328 + ], + "spans": [ + { + "bbox": [ + 105, + 283, + 505, + 328 + ], + "type": "text", + "content": "Bo Li*, Peiyuan Zhang*, Kaicheng Zhang*, Fanyi Pu*, Xinrun Du, Yuhao Dong, Haotian Liu, Yuanhan Zhang, Ge Zhang, Chunyuan Li, and Ziwei Liu. Lmms-eval: Accelerating the development of large multimoal models, March 2024. URL https://github.com/EvolvingLMMs-Lab/lmms-eval." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 335, + 507, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 335, + 507, + 370 + ], + "spans": [ + { + "bbox": [ + 105, + 335, + 507, + 370 + ], + "type": "text", + "content": "Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 375, + 505, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 375, + 505, + 399 + ], + "spans": [ + { + "bbox": [ + 105, + 375, + 505, + 399 + ], + "type": "text", + "content": "Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning, 2023a. URL https://arxiv.org/abs/2310.03744." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 404, + 507, + 438 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 404, + 507, + 438 + ], + "spans": [ + { + "bbox": [ + 105, + 404, + 507, + 438 + ], + "type": "text", + "content": "Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava-last: Improved reasoning,OCR, and world knowledge, 2024a. URL https://arxiv.org/pdf/2401.13601." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 445, + 505, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 445, + 505, + 469 + ], + "spans": [ + { + "bbox": [ + 105, + 445, + 505, + 469 + ], + "type": "text", + "content": "Jian Liu, Leyang Cui, Hanmeng Liu, Dandan Huang, Yile Wang, and Yue Zhang. Logiqa: A challenge dataset for machine reading comprehension with logical reasoning, 2020." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 474, + 507, + 519 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 474, + 507, + 519 + ], + "spans": [ + { + "bbox": [ + 105, + 474, + 507, + 519 + ], + "type": "text", + "content": "Junpeng Liu, Tianyue Ou, Yifan Song, Yuxiao Qu, Wai Lam, Chenyan Xiong, Wenhu Chen, Graham Neubig, and Xiang Yue. Harnessing webpage uis for text-rich visual understanding. ArXiv, abs/2410.13824, 2024b. URL https://api(semanticscholar.org/ CorpusID:273403951." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 525, + 507, + 571 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 525, + 507, + 571 + ], + "spans": [ + { + "bbox": [ + 105, + 525, + 507, + 571 + ], + "type": "text", + "content": "Yuanzhan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, Kai Chen, and Dahua Lin. Mmbench: Is your multi-modal model an all-around player? In European Conference on Computer Vision, 2023b. URL https://api_semanticscholar.org/CorpusID:259837088." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 576, + 507, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 576, + 507, + 621 + ], + "spans": [ + { + "bbox": [ + 105, + 576, + 507, + 621 + ], + "type": "text", + "content": "Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. arXiv preprint arXiv:2310.02255, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 628, + 507, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 628, + 507, + 673 + ], + "spans": [ + { + "bbox": [ + 105, + 628, + 507, + 673 + ], + "type": "text", + "content": "Kenneth Marino, Mohammad Rastegari, Ali Farhadi, and Roozbeh Mottaghi. Ok-vqa: A visual question answering benchmark requiring external knowledge. 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 3190-3199, 2019. URL https://api_semanticscholar.org/CorpusID:173991173." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 679, + 507, + 703 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 679, + 507, + 703 + ], + "spans": [ + { + "bbox": [ + 105, + 679, + 507, + 703 + ], + "type": "text", + "content": "OpenAI. Hello gpt4-o. https://openai.com/index/hello-gpt-4o/, 2024. URL https://openai.com/index/hello-gpt-4o/." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 708, + 507, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 708, + 507, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 507, + 732 + ], + "type": "text", + "content": "Qwen Team. Qvq: To see the world with wisdom, December 2024. URL https://qwenlm.github.io/blog/qvq-72b-preview/." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 528 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 507, + 105 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 507, + 105 + ], + "type": "text", + "content": "Qwen Team. Qwen2.5-vl, January 2025a. URL https://qwenlm.github.io/blog/qwen2.5-v1/." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 111, + 505, + 136 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 111, + 505, + 136 + ], + "spans": [ + { + "bbox": [ + 105, + 111, + 505, + 136 + ], + "type": "text", + "content": "Qwen Team. Qwq-32b: Embracing the power of reinforcement learning, March 2025b. URL https://qwenlm.github.io/blog/qwq-32b/." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 141, + 507, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 141, + 507, + 186 + ], + "spans": [ + { + "bbox": [ + 105, + 141, + 507, + 186 + ], + "type": "text", + "content": "Jonathan Roberts, Kai Han, Neil Houlsby, and Samuel Albanie. SciFIBench: Benchmarking large multimodal models for scientific figure interpretation. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2024. URL https://openreview.net/forum?id=HcLFNuQwy5." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 193, + 507, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 193, + 507, + 239 + ], + "spans": [ + { + "bbox": [ + 105, + 193, + 507, + 239 + ], + "type": "text", + "content": "Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, et al. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. ArXiv preprint, abs/2406.16860, 2024. URL https://arxiv.org/abs/2406.16860." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 244, + 507, + 280 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 244, + 507, + 280 + ], + "spans": [ + { + "bbox": [ + 105, + 244, + 507, + 280 + ], + "type": "text", + "content": "Ke Wang, Junting Pan, Weikang Shi, Zimu Lu, Houxing Ren, Aojun Zhou, Mingjie Zhan, and Hongsheng Li. Measuring multimodal mathematical reasoning with math-vision dataset. Advances in Neural Information Processing Systems, 37:95095-95169, 2024a." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 285, + 507, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 285, + 507, + 320 + ], + "spans": [ + { + "bbox": [ + 105, + 285, + 507, + 320 + ], + "type": "text", + "content": "Ke Wang, Junting Pan, Weikang Shi, Zimu Lu, Mingjie Zhan, and Hongsheng Li. Measuring multimodal mathematical reasoning with math-vision dataset, 2024b. URL https:// arxiv.org/abs/2402.14804." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 327, + 507, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 327, + 507, + 361 + ], + "spans": [ + { + "bbox": [ + 105, + 327, + 507, + 361 + ], + "type": "text", + "content": "An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, et al. Qwen2 technical report. ArXiv preprint, abs/2407.10671, 2024. URL https://arxiv.org/abs/2407.10671." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 367, + 507, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 367, + 507, + 425 + ], + "spans": [ + { + "bbox": [ + 105, + 367, + 507, + 425 + ], + "type": "text", + "content": "Xiang Yue, Yuansheng Ni, Kai Zhang, Tianyu Zheng, Ruoqi Liu, Ge Zhang, Samuel Stevens, Dongfu Jiang, Weiming Ren, Yuxuan Sun, Cong Wei, Botao Yu, Ruibin Yuan, Renliang Sun, Ming Yin, Boyuan Zheng, Zhenzhu Yang, Yibo Liu, Wenhao Huang, Huan Sun, Yu Su, and Wenhu Chen. Mmmu: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi. In Proceedings of CVPR, 2024a." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 430, + 507, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 430, + 507, + 465 + ], + "spans": [ + { + "bbox": [ + 105, + 430, + 507, + 465 + ], + "type": "text", + "content": "Xiang Yue, Tianyu Zheng, Yuansheng Ni, Yubo Wang, Kai Zhang, Shengbang Tong, Yuxuan Sun, Botao Yu, Ge Zhang, Huan Sun, et al. Mmmu-pro: A more robust multi-discipline multimodal understanding benchmark. arXiv preprint arXiv:2409.02813, 2024b." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 471, + 507, + 528 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 471, + 507, + 528 + ], + "spans": [ + { + "bbox": [ + 105, + 471, + 507, + 528 + ], + "type": "text", + "content": "Xiang Yue, Yueqi Song, Akari Asai, Simran Khanuja, Anjali Kantharuban, Seungone Kim, Jean de Dieu Nyandwi, Lintang Sutawika, Sathyanarayanan Ramamoorthy, and Graham Neubig. Pangea: A fully open multilingual multimodal LLM for 39 languages. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=a3g214yEys." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 211, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 211, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 211, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 310, + 97 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 310, + 97 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 310, + 97 + ], + "type": "text", + "content": "Table of Contents in Appendix" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 112, + 505, + 124 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 112, + 505, + 124 + ], + "spans": [ + { + "bbox": [ + 107, + 112, + 505, + 124 + ], + "type": "text", + "content": "A VISUALPUZZLES Statistics 16" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 120, + 129, + 505, + 158 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 120, + 129, + 505, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 129, + 505, + 140 + ], + "spans": [ + { + "bbox": [ + 120, + 129, + 505, + 140 + ], + "type": "text", + "content": "A.1 Breakdown of Statistics of VISUALPUZZLES 16" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 121, + 146, + 505, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 146, + 505, + 158 + ], + "spans": [ + { + "bbox": [ + 121, + 146, + 505, + 158 + ], + "type": "text", + "content": "A.2 Data Sources 16" + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 106, + 173, + 505, + 186 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 173, + 505, + 186 + ], + "spans": [ + { + "bbox": [ + 106, + 173, + 505, + 186 + ], + "type": "text", + "content": "B Model Evaluation Setup 16" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 200, + 505, + 213 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 200, + 505, + 213 + ], + "spans": [ + { + "bbox": [ + 106, + 200, + 505, + 213 + ], + "type": "text", + "content": "C Human Annotation Setup 16" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 120, + 217, + 505, + 247 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 120, + 217, + 505, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 217, + 505, + 229 + ], + "spans": [ + { + "bbox": [ + 120, + 217, + 505, + 229 + ], + "type": "text", + "content": "C.1 Difficulty Labeling 16" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 121, + 234, + 505, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 234, + 505, + 247 + ], + "spans": [ + { + "bbox": [ + 121, + 234, + 505, + 247 + ], + "type": "text", + "content": "C.2 Reasoning Category Labeling 17" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 106, + 260, + 505, + 272 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 260, + 505, + 272 + ], + "spans": [ + { + "bbox": [ + 106, + 260, + 505, + 272 + ], + "type": "text", + "content": "D Full Results 17" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 121, + 277, + 505, + 306 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 121, + 277, + 505, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 277, + 505, + 289 + ], + "spans": [ + { + "bbox": [ + 121, + 277, + 505, + 289 + ], + "type": "text", + "content": "D.1 Full Results w/ CoT 17" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 121, + 294, + 505, + 306 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 294, + 505, + 306 + ], + "spans": [ + { + "bbox": [ + 121, + 294, + 505, + 306 + ], + "type": "text", + "content": "D.2 Full Results w/n CoT 17" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 106, + 321, + 505, + 334 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 321, + 505, + 334 + ], + "spans": [ + { + "bbox": [ + 106, + 321, + 505, + 334 + ], + "type": "text", + "content": "E Knowledge Checklist 17" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 121, + 338, + 505, + 384 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 121, + 338, + 505, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 338, + 505, + 350 + ], + "spans": [ + { + "bbox": [ + 121, + 338, + 505, + 350 + ], + "type": "text", + "content": "E.1 Knowledge Checklist Generation 17" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 121, + 355, + 505, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 355, + 505, + 367 + ], + "spans": [ + { + "bbox": [ + 121, + 355, + 505, + 367 + ], + "type": "text", + "content": "E.2 Example Knowledge Checklist Question 20" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 121, + 372, + 505, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 372, + 505, + 384 + ], + "spans": [ + { + "bbox": [ + 121, + 372, + 505, + 384 + ], + "type": "text", + "content": "E.3 Knowledge Checklist Human Annotation 20" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 106, + 399, + 505, + 411 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 399, + 505, + 411 + ], + "spans": [ + { + "bbox": [ + 106, + 399, + 505, + 411 + ], + "type": "text", + "content": "F Reasoning Complexity 20" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 426, + 505, + 438 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 426, + 505, + 438 + ], + "spans": [ + { + "bbox": [ + 106, + 426, + 505, + 438 + ], + "type": "text", + "content": "G Comparison with Other Benchmarks 20" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 453, + 505, + 465 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 453, + 505, + 465 + ], + "spans": [ + { + "bbox": [ + 106, + 453, + 505, + 465 + ], + "type": "text", + "content": "H Additional Analysis 21" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 121, + 470, + 505, + 550 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 121, + 470, + 505, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 470, + 505, + 483 + ], + "spans": [ + { + "bbox": [ + 121, + 470, + 505, + 483 + ], + "type": "text", + "content": "H.1 Proprietary V.S. Open Models 21" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 121, + 487, + 505, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 487, + 505, + 499 + ], + "spans": [ + { + "bbox": [ + 121, + 487, + 505, + 499 + ], + "type": "text", + "content": "H.2 Reasoning Category and Difficulty Levels 21" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 121, + 504, + 505, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 504, + 505, + 517 + ], + "spans": [ + { + "bbox": [ + 121, + 504, + 505, + 517 + ], + "type": "text", + "content": "H.3 Option Types and Difficulty Levels 24" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 121, + 521, + 505, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 521, + 505, + 533 + ], + "spans": [ + { + "bbox": [ + 121, + 521, + 505, + 533 + ], + "type": "text", + "content": "H.4 Case Study of Reasoning 25" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 121, + 537, + 505, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 537, + 505, + 550 + ], + "spans": [ + { + "bbox": [ + 121, + 537, + 505, + 550 + ], + "type": "text", + "content": "H.5 Impact of CoT 25" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 106, + 564, + 505, + 577 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 564, + 505, + 577 + ], + "spans": [ + { + "bbox": [ + 106, + 564, + 505, + 577 + ], + "type": "text", + "content": "I Case Study 27" + } + ] + } + ], + "index": 29 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 80, + 275, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 80, + 275, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 80, + 275, + 94 + ], + "type": "text", + "content": "A VISUALPUZZLES Statistics" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 106, + 334, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 106, + 334, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 334, + 118 + ], + "type": "text", + "content": "A.1 Breakdown of Statistics of VISUALPUZZLES" + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 118, + 148, + 492, + 259 + ], + "blocks": [ + { + "bbox": [ + 105, + 126, + 416, + 140 + ], + "lines": [ + { + "bbox": [ + 105, + 126, + 416, + 140 + ], + "spans": [ + { + "bbox": [ + 105, + 126, + 416, + 140 + ], + "type": "text", + "content": "Table 6 shows a breakdown of statistics of VISUALPUZZLES questions." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 118, + 148, + 492, + 259 + ], + "lines": [ + { + "bbox": [ + 118, + 148, + 492, + 259 + ], + "spans": [ + { + "bbox": [ + 118, + 148, + 492, + 259 + ], + "type": "table", + "html": "
Reasoning CategoryImage OptionsText OptionsTotal
EasyMediumHardEasyMediumHard
Algorithmic21801241009262
Analogical1208110000211
Deductive29242457921200
Inductive770127320209
Spatial12341661523286
Total300224145233233331168
", + "image_path": "97f4b7976903cd9ad74954a9240e4fb1d04f429d28e0ec5595ed2893a243218d.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 304, + 194, + 316 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 304, + 194, + 316 + ], + "spans": [ + { + "bbox": [ + 105, + 304, + 194, + 316 + ], + "type": "text", + "content": "A.2 Data Sources" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 320, + 505, + 445 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 130, + 320, + 504, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 320, + 504, + 344 + ], + "spans": [ + { + "bbox": [ + 130, + 320, + 504, + 344 + ], + "type": "text", + "content": "- Chinese Civil Service Examination (中国国家公务员考试) 5 (224 puzzles): we manually translated questions from this exam to English from Chinese." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 346, + 504, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 346, + 504, + 369 + ], + "spans": [ + { + "bbox": [ + 130, + 346, + 504, + 369 + ], + "type": "text", + "content": "Textbooks (210 puzzles): we carefully collected and re-purposed questions from online resources and textbooks." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 372, + 504, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 372, + 504, + 396 + ], + "spans": [ + { + "bbox": [ + 130, + 372, + 504, + 396 + ], + "type": "text", + "content": "- Smart-101 (Cherian et al., 2022a) (247 puzzles): we carefully selected images from this benchmark and synthesized new questions." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 397, + 505, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 397, + 505, + 422 + ], + "spans": [ + { + "bbox": [ + 130, + 397, + 505, + 422 + ], + "type": "text", + "content": "- MATH-Vision (Wang et al., 2024a) (293 puzzles): we carefully selected and repurposed questions from this benchmark." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 423, + 504, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 423, + 504, + 445 + ], + "spans": [ + { + "bbox": [ + 130, + 423, + 504, + 445 + ], + "type": "text", + "content": "VASR (Bitton et al., 2023) (194 puzzles): we carefully selected questions from this benchmark." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 462, + 261, + 477 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 462, + 261, + 477 + ], + "spans": [ + { + "bbox": [ + 105, + 462, + 261, + 477 + ], + "type": "text", + "content": "B Model Evaluation Setup" + } + ] + } + ], + "index": 13 + }, + { + "type": "table", + "bbox": [ + 106, + 490, + 502, + 581 + ], + "blocks": [ + { + "bbox": [ + 105, + 266, + 504, + 281 + ], + "lines": [ + { + "bbox": [ + 105, + 266, + 504, + 281 + ], + "spans": [ + { + "bbox": [ + 105, + 266, + 504, + 281 + ], + "type": "text", + "content": "Table 6: Number of questions in each reasoning category, option types, and difficulty levels." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 490, + 502, + 581 + ], + "lines": [ + { + "bbox": [ + 106, + 490, + 502, + 581 + ], + "spans": [ + { + "bbox": [ + 106, + 490, + 502, + 581 + ], + "type": "table", + "html": "
Model Evaluation Prompt with Chain-of-Thought
Solve the multiple-choice question and then answer with the option letter from the given choices. The last line of your response should be of the following format: 'Answer: $LETTER' (without quotes) where LETTER is one of options. Think step by step before answering.
Model Evaluation Prompt w/n Chain-of-Thought
Answer the question with the option's letter from the given choices directly.
", + "image_path": "4cfbe405921837246d7d60cac33ea7bec0b336bb12e4e84a9fe4589d15475253.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 597, + 271, + 613 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 597, + 271, + 613 + ], + "spans": [ + { + "bbox": [ + 105, + 597, + 271, + 613 + ], + "type": "text", + "content": "C Human Annotation Setup" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 623, + 220, + 636 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 623, + 220, + 636 + ], + "spans": [ + { + "bbox": [ + 105, + 623, + 220, + 636 + ], + "type": "text", + "content": "C.1 Difficulty Labeling" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 643, + 506, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 506, + 668 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 506, + 668 + ], + "type": "text", + "content": "Each question was also carefully assigned a difficulty label from easy, medium, or hard, based on the cognitive load required for reasoning." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 130, + 676, + 505, + 704 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 130, + 676, + 491, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 676, + 491, + 689 + ], + "spans": [ + { + "bbox": [ + 130, + 676, + 491, + 689 + ], + "type": "text", + "content": "- Easy Level questions could be solved by the annotator in less than one minute." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 130, + 690, + 505, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 690, + 505, + 704 + ], + "spans": [ + { + "bbox": [ + 130, + 690, + 505, + 704 + ], + "type": "text", + "content": "- Medium Level questions could be solved by the annotator in one to three minutes." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 710, + 476, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 710, + 476, + 731 + ], + "spans": [ + { + "bbox": [ + 105, + 710, + 476, + 731 + ], + "type": "text", + "content": "5https://en.wikipedia.org/wiki/Civil服务体系_of_the_People%27s_Republic_of_China#Examinations." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 130, + 82, + 504, + 106 + ], + "type": "text", + "content": "- Hard Level questions require the annotator more than five minutes to solve or quit solving." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 109, + 137, + 300, + 149 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 137, + 300, + 149 + ], + "spans": [ + { + "bbox": [ + 109, + 137, + 300, + 149 + ], + "type": "text", + "content": "Annotation Guideline for Puzzle Difficulty" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 108, + 154, + 501, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 154, + 501, + 175 + ], + "spans": [ + { + "bbox": [ + 108, + 154, + 501, + 175 + ], + "type": "text", + "content": "Try to solve the puzzle first. You need to measure the time you attempted to solve each puzzle. Then, select from Easy, Medium, or Hard based on the time required." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 108, + 175, + 500, + 235 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 108, + 175, + 500, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 175, + 500, + 194 + ], + "spans": [ + { + "bbox": [ + 108, + 175, + 500, + 194 + ], + "type": "text", + "content": "- Easy Level: You can solve or answer the question within 1 minute. This level of puzzles should require minimal reasoning." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 108, + 194, + 500, + 214 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 194, + 500, + 214 + ], + "spans": [ + { + "bbox": [ + 108, + 194, + 500, + 214 + ], + "type": "text", + "content": "- Medium Level: You can solve or answer the question within 1-3 minutes. This level of puzzles should demand moderate reasoning." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 108, + 214, + 500, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 214, + 500, + 235 + ], + "spans": [ + { + "bbox": [ + 108, + 214, + 500, + 235 + ], + "type": "text", + "content": "- Hard Level: You can / cannot solve this question with more than 5 minutes. This level of puzzles should involve significant / multi-step reasoning." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 263, + 268, + 277 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 263, + 268, + 277 + ], + "spans": [ + { + "bbox": [ + 105, + 263, + 268, + 277 + ], + "type": "text", + "content": "C.2 Reasoning Category Labeling" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 109, + 294, + 348, + 307 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 294, + 348, + 307 + ], + "spans": [ + { + "bbox": [ + 109, + 294, + 348, + 307 + ], + "type": "text", + "content": "Annotation Guideline for Puzzle Reasoning Category" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 108, + 312, + 500, + 392 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 108, + 312, + 500, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 312, + 500, + 331 + ], + "spans": [ + { + "bbox": [ + 108, + 312, + 500, + 331 + ], + "type": "text", + "content": "Assign the category that best describes the primary type of reasoning or logic required for each puzzle:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 108, + 332, + 500, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 332, + 500, + 351 + ], + "spans": [ + { + "bbox": [ + 108, + 332, + 500, + 351 + ], + "type": "text", + "content": "- Algorithmic Reasoning: Involves following or devising a step-by-step procedure or rule-based process." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 108, + 352, + 500, + 362 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 352, + 500, + 362 + ], + "spans": [ + { + "bbox": [ + 108, + 352, + 500, + 362 + ], + "type": "text", + "content": "- Analogical Reasoning: Requires identifying relationships by comparison between pairs of entities." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 108, + 362, + 484, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 362, + 484, + 372 + ], + "spans": [ + { + "bbox": [ + 108, + 362, + 484, + 372 + ], + "type": "text", + "content": "- Deductive Reasoning: Involves deriving specific conclusions from general or given premises." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 108, + 372, + 458, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 372, + 458, + 382 + ], + "spans": [ + { + "bbox": [ + 108, + 372, + 458, + 382 + ], + "type": "text", + "content": "- Inductive Reasoning: Focuses on generalizing a rule or pattern from specific instances." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 108, + 382, + 481, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 382, + 481, + 392 + ], + "spans": [ + { + "bbox": [ + 108, + 382, + 481, + 392 + ], + "type": "text", + "content": "- Spatial Reasoning: Involves visualizing and manipulating shapes, distances, or orientations." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 424, + 195, + 436 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 424, + 195, + 436 + ], + "spans": [ + { + "bbox": [ + 105, + 424, + 195, + 436 + ], + "type": "text", + "content": "D Full Results" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 456, + 224, + 468 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 456, + 224, + 468 + ], + "spans": [ + { + "bbox": [ + 105, + 456, + 224, + 468 + ], + "type": "text", + "content": "D.1 Full Results w/ CoT" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 483, + 230, + 494 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 483, + 230, + 494 + ], + "spans": [ + { + "bbox": [ + 105, + 483, + 230, + 494 + ], + "type": "text", + "content": "D.2 Full Results w/n CoT" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 510, + 246, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 510, + 246, + 525 + ], + "spans": [ + { + "bbox": [ + 105, + 510, + 246, + 525 + ], + "type": "text", + "content": "E Knowledge Checklist" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 543, + 284, + 555 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 543, + 284, + 555 + ], + "spans": [ + { + "bbox": [ + 105, + 543, + 284, + 555 + ], + "type": "text", + "content": "E.1 Knowledge Checklist Generation" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 109, + 574, + 342, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 574, + 342, + 586 + ], + "spans": [ + { + "bbox": [ + 109, + 574, + 342, + 586 + ], + "type": "text", + "content": "Prompt to Generate Knowledge Checklist Questions" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 108, + 590, + 500, + 623 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 590, + 500, + 623 + ], + "spans": [ + { + "bbox": [ + 108, + 590, + 500, + 623 + ], + "type": "text", + "content": "You are an exam writer. You are now writing a knowledge test. You are given a question (Question) regarding an image and its standard solution (Solution), your task is to write free response questions that test on individual knowledge required in answering the question correctly." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 109, + 630, + 313, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 630, + 313, + 641 + ], + "spans": [ + { + "bbox": [ + 109, + 630, + 313, + 641 + ], + "type": "text", + "content": "You should follow these steps to complete the task:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 110, + 642, + 500, + 701 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 110, + 642, + 351, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 642, + 351, + 651 + ], + "spans": [ + { + "bbox": [ + 110, + 642, + 351, + 651 + ], + "type": "text", + "content": "1. explicitly analyze the given image, Question, and Solution" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 110, + 651, + 436, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 651, + 436, + 661 + ], + "spans": [ + { + "bbox": [ + 110, + 651, + 436, + 661 + ], + "type": "text", + "content": "2. explicitly list out the individual knowledge concepts required to reach Solution." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 110, + 661, + 500, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 661, + 500, + 691 + ], + "spans": [ + { + "bbox": [ + 110, + 661, + 500, + 691 + ], + "type": "text", + "content": "3. write free response questions to test on the definition of each concept listed. Your generated questions should not include details of the given Question. Note that you need to provide answer keys to these questions too." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 110, + 691, + 317, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 691, + 317, + 701 + ], + "spans": [ + { + "bbox": [ + 110, + 691, + 317, + 701 + ], + "type": "text", + "content": "4. format the free response questions in json format." + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 109, + 711, + 187, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 711, + 187, + 721 + ], + "spans": [ + { + "bbox": [ + 109, + 711, + 187, + 721 + ], + "type": "text", + "content": "Question: question" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 110, + 721, + 179, + 730 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 721, + 179, + 730 + ], + "spans": [ + { + "bbox": [ + 110, + 721, + 179, + 730 + ], + "type": "text", + "content": "Solution: answer" + } + ] + } + ], + "index": 31 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 211, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 211, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 211, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 107, + 179, + 504, + 597 + ], + "blocks": [ + { + "bbox": [ + 107, + 179, + 504, + 597 + ], + "lines": [ + { + "bbox": [ + 107, + 179, + 504, + 597 + ], + "spans": [ + { + "bbox": [ + 107, + 179, + 504, + 597 + ], + "type": "table", + "html": "
ModelAlgorithmicAnalogicalDeductiveInductiveSpatialOverall
Random Choice25.025.025.025.025.025.0
Human (95th Percentile)100.0100.0100.081.6100.089.3
Human (50th Percentile)88.066.080.050.090.075.0
Human (5th Percentile)68.125.037.00.059.157.5
Proprietary Models
o4-mini65.368.775.533.045.557.0
o364.568.369.527.342.754.0
o163.768.367.529.234.351.8
GPT-4o49.258.349.027.326.241.3
Gemini-2.5-pro60.064.060.029.736.449.5
Gemini-2.0-flash55.358.857.024.431.845.0
Gemini-2.0-flash-thinking46.670.149.024.925.542.2
Gemini-1.5-Pro53.457.458.526.332.545.0
Claude-3.7-Sonnet64.548.365.026.837.448.3
Claude-3.7-Sonnet-thinking67.244.161.531.137.148.2
Claude-3.5-Sonnet53.447.951.525.434.342.4
Open Models
LLaVA-1.5-7B23.321.836.020.619.223.7
LLaVA-1.5-13B24.821.823.025.425.524.2
LLaVA-1.6-7B27.523.730.022.521.324.8
LLaVA-1.6-13B25.225.627.027.323.425.5
LLaVA-1.6-34B29.428.043.024.925.529.7
LLaVA-OV-0.5B21.026.130.522.525.224.8
LLaVA-OV-7B27.926.136.523.425.527.7
LLaVA-OV-72B34.726.537.027.328.730.8
Llama-3.2-11B-Vision-Instruct31.030.839.021.126.229.4
Llama-3.2-90B-Vision-Instruct45.023.243.026.331.534.1
Qwen-VL21.431.325.026.324.125.3
Qwen2-VL-72B41.628.439.522.529.032.4
QvQ-72B-Preview43.145.548.027.327.637.8
Qwen2-VL-2B-Instruct26.026.124.527.825.526.0
Qwen2-VL-7B-Instruct36.321.838.520.622.727.9
Qwen2-VL-72B-Instruct39.933.545.223.532.434.9
Qwen2.5-VL-3B-Instruct35.127.544.525.824.831.2
Qwen2.5-VL-7B-Instruct40.526.639.024.029.732.1
Qwen2.5-VL-72B-Instruct53.446.958.025.829.542.3
Cambrian-8B31.324.236.024.029.028.9
Cambrian-13B24.825.639.524.421.026.5
Pangea-7B30.528.935.024.425.228.6
", + "image_path": "ed5578a37fe0c76ed27e20a91c7422833b5c24ad0a429cef3aaf0b4bb114d8e2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 605, + 506, + 628 + ], + "lines": [ + { + "bbox": [ + 105, + 605, + 506, + 628 + ], + "spans": [ + { + "bbox": [ + 105, + 605, + 506, + 628 + ], + "type": "text", + "content": "Table 7: Performance (%) of various models with Chain of Thoughts (CoT) on VISUALPUZZLES." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 211, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 211, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 211, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 107, + 219, + 504, + 559 + ], + "blocks": [ + { + "bbox": [ + 107, + 219, + 504, + 559 + ], + "lines": [ + { + "bbox": [ + 107, + 219, + 504, + 559 + ], + "spans": [ + { + "bbox": [ + 107, + 219, + 504, + 559 + ], + "type": "table", + "html": "
ModelAlgorithmicAnalogicalDeductiveInductiveSpatialOverall
Random Choice25.025.025.025.025.025.0
Human (95th Percentile)100.0100.0100.081.6100.089.3
Human (50th Percentile)88.066.080.050.090.075.0
Human (5th Percentile)68.125.037.00.059.157.5
Proprietary Models
GPT-4o40.834.140.524.929.734.0
Gemini-2.0-flash57.641.758.023.035.743.2
Gemini-1.5-Pro51.246.554.024.929.440.8
Open Models
LLaVA-1.5-7B24.424.734.526.825.526.9
LLaVA-1.5-13B24.426.133.526.328.327.6
LLaVA-1.6-7B27.525.132.524.927.327.4
LLaVA-1.6-13B21.424.729.528.223.125.0
LLaVA-1.6-34B31.327.343.024.427.629.8
LLaVA-OV-0.5B24.425.637.524.925.527.2
LLaVA-OV-7B27.528.040.524.428.029.4
LLaVA-OV-72B31.723.645.021.324.628.8
Llama-3.2-11B-Vision-Instruct27.524.231.026.327.627.3
Llama-3.2-90B-Vision-Instruct38.222.344.525.833.633.1
Qwen-VL23.726.529.527.826.626.6
Qwen2-VL-72B38.928.443.020.629.032.0
QvQ-72B-Preview44.843.644.026.830.837.8
Qwen2-VL-2B-Instruct31.729.440.523.931.531.3
Qwen2-VL-7B-Instruct33.624.246.022.526.230.2
Qwen2-VL-72B-Instruct40.530.346.025.429.434.2
Qwen2.5-VL-3B-Instruct36.326.147.025.822.431.0
Qwen2.5-VL-7B-Instruct38.223.751.524.931.133.7
Qwen2.5-VL-72B-Instruct43.140.351.525.433.738.6
Cambrian-8B25.220.435.023.020.624.5
Cambrian-13B23.328.036.524.926.227.4
Pangea-7B32.423.738.528.732.531.3
", + "image_path": "64cd8ec62f766c056e33cbfafa74cd1d10500a34f2b993be19215152727c7859.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 567, + 504, + 589 + ], + "lines": [ + { + "bbox": [ + 105, + 567, + 504, + 589 + ], + "spans": [ + { + "bbox": [ + 105, + 567, + 504, + 589 + ], + "type": "text", + "content": "Table 8: Performance (%) of various models with Multiple Choice Direct prompting on VISUALPUZZLES." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 211, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 211, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 211, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 316, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 316, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 316, + 95 + ], + "type": "text", + "content": "E.2 Example Knowledge Checklist Question" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 108, + 335, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 108, + 335, + 121 + ], + "spans": [ + { + "bbox": [ + 110, + 108, + 335, + 121 + ], + "type": "text", + "content": "Example Knowledge Checklist Question (MMMU)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 125, + 500, + 177 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 107, + 125, + 465, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 125, + 465, + 137 + ], + "spans": [ + { + "bbox": [ + 107, + 125, + 465, + 137 + ], + "type": "text", + "content": "- Question: Explain the Arbitrage Pricing Theory (APT) model and its purpose in finance." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 137, + 500, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 137, + 500, + 177 + ], + "spans": [ + { + "bbox": [ + 107, + 137, + 500, + 177 + ], + "type": "text", + "content": "- Answer: The Arbitrage Pricing Theory (APT) model is a financial theory that estimates the expected return on an asset based on the asset's sensitivity to various macroeconomic factors. It is used to determine the fair price of an asset by considering multiple factors that could affect its return, as opposed to relying on a single market index as in the Capital Asset Pricing Model (CAPM)." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 110, + 189, + 373, + 201 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 189, + 373, + 201 + ], + "spans": [ + { + "bbox": [ + 110, + 189, + 373, + 201 + ], + "type": "text", + "content": "Example Knowledge Checklist Question (VISUALPUZZLES)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 206, + 498, + 228 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 107, + 206, + 381, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 206, + 381, + 217 + ], + "spans": [ + { + "bbox": [ + 107, + 206, + 381, + 217 + ], + "type": "text", + "content": "- Question: What is the definition of distance in a geometric context?" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 217, + 498, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 217, + 498, + 228 + ], + "spans": [ + { + "bbox": [ + 107, + 217, + 498, + 228 + ], + "type": "text", + "content": "- Answer: Distance in a geometric context refers to the measurement of space between two points." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 247, + 321, + 258 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 247, + 321, + 258 + ], + "spans": [ + { + "bbox": [ + 105, + 247, + 321, + 258 + ], + "type": "text", + "content": "E.3 Knowledge Checklist Human Annotation" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 268, + 504, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 268, + 504, + 301 + ], + "spans": [ + { + "bbox": [ + 104, + 268, + 504, + 301 + ], + "type": "text", + "content": "We asked two human annotators to manually verify and correct the knowledge checklist questions and gave them the following instructions. The inter-annotator agreement rate is " + }, + { + "bbox": [ + 104, + 268, + 504, + 301 + ], + "type": "inline_equation", + "content": "87.8\\%" + }, + { + "bbox": [ + 104, + 268, + 504, + 301 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 110, + 312, + 253, + 323 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 312, + 253, + 323 + ], + "spans": [ + { + "bbox": [ + 110, + 312, + 253, + 323 + ], + "type": "text", + "content": "Human Annotation Instructions" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 108, + 326, + 405, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 326, + 405, + 337 + ], + "spans": [ + { + "bbox": [ + 108, + 326, + 405, + 337 + ], + "type": "text", + "content": "You are given a json file, where each item contains the following elements:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 108, + 337, + 500, + 376 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 108, + 337, + 264, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 337, + 264, + 346 + ], + "spans": [ + { + "bbox": [ + 108, + 337, + 264, + 346 + ], + "type": "text", + "content": "- Question: a multiple-choice question." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 108, + 346, + 375, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 346, + 375, + 357 + ], + "spans": [ + { + "bbox": [ + 108, + 346, + 375, + 357 + ], + "type": "text", + "content": "- Answer: the answer to the question with an optional explanation." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 108, + 357, + 500, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 357, + 500, + 376 + ], + "spans": [ + { + "bbox": [ + 108, + 357, + 500, + 376 + ], + "type": "text", + "content": "- Knowledge Concept Checklist: a list of question-answer pairs, where each question in the list is intended to represent a distinct knowledge concept necessary for solving the Question." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 108, + 385, + 500, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 385, + 500, + 407 + ], + "spans": [ + { + "bbox": [ + 108, + 385, + 500, + 407 + ], + "type": "text", + "content": "You task is to annotate the knowledge concept checklists generated by a model. You should carefully evaluate each question-answer pair based on the following criteria:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 108, + 407, + 501, + 516 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 108, + 407, + 500, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 407, + 500, + 426 + ], + "spans": [ + { + "bbox": [ + 108, + 407, + 500, + 426 + ], + "type": "text", + "content": "1. Necessity: Is the question genuinely necessary for solving the problem? If not, then remove the question." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 108, + 426, + 501, + 446 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 426, + 501, + 446 + ], + "spans": [ + { + "bbox": [ + 108, + 426, + 501, + 446 + ], + "type": "text", + "content": "2. Repetition: Check if any questions are repetitive or duplicate existing questions within the list. If the question is repetitive or duplicate, then remove the question." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 108, + 446, + 500, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 446, + 500, + 466 + ], + "spans": [ + { + "bbox": [ + 108, + 446, + 500, + 466 + ], + "type": "text", + "content": "3. Completeness: Ensure no critical knowledge concepts required to solve the problem are missing, and identify if any additional important questions should have been included." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 108, + 466, + 500, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 466, + 500, + 486 + ], + "spans": [ + { + "bbox": [ + 108, + 466, + 500, + 486 + ], + "type": "text", + "content": "4. Correctness: Verify whether the provided answers are accurate. Revise the checklist in case of incorrect checklist QA pairs." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 108, + 486, + 500, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 486, + 500, + 516 + ], + "spans": [ + { + "bbox": [ + 108, + 486, + 500, + 516 + ], + "type": "text", + "content": "5. Knowledge v.s. Skills: Ensure each question explicitly evaluates a knowledge concept rather than testing skills or problem-solving techniques. Remove any questions that primarily evaluate skills instead of knowledge." + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 538, + 251, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 538, + 251, + 552 + ], + "spans": [ + { + "bbox": [ + 105, + 538, + 251, + 552 + ], + "type": "text", + "content": "F Reasoning Complexity" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 110, + 570, + 358, + 582 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 570, + 358, + 582 + ], + "spans": [ + { + "bbox": [ + 110, + 570, + 358, + 582 + ], + "type": "text", + "content": "Instruction Prompt to Solve Questions in Detailed Steps" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 108, + 586, + 211, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 586, + 211, + 598 + ], + "spans": [ + { + "bbox": [ + 108, + 586, + 211, + 598 + ], + "type": "text", + "content": " < \\text{Imoge}>" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 108, + 598, + 500, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 598, + 500, + 617 + ], + "spans": [ + { + "bbox": [ + 108, + 598, + 500, + 617 + ], + "type": "text", + "content": "Solve this question with First Order Logic. Write out each thinking step explicitly, do not skip steps. In your response, begin each step with ____STEP_START__." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 109, + 618, + 186, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 618, + 186, + 628 + ], + "spans": [ + { + "bbox": [ + 109, + 618, + 186, + 628 + ], + "type": "text", + "content": "step " + }, + { + "bbox": [ + 109, + 618, + 186, + 628 + ], + "type": "inline_equation", + "content": "<" + }, + { + "bbox": [ + 109, + 618, + 186, + 628 + ], + "type": "text", + "content": " step_num>" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 650, + 331, + 663 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 650, + 331, + 663 + ], + "spans": [ + { + "bbox": [ + 105, + 650, + 331, + 663 + ], + "type": "text", + "content": "G Comparison with Other Benchmarks" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 104, + 676, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 676, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 676, + 506, + 734 + ], + "type": "text", + "content": "Figure 9 provides a comparative analysis between VISUALPUZZLES and several widely-used benchmarks for multimodal reasoning, visualizing the knowledge requirement and reasoning complexity of each benchmark. VISUALPUZZLES has high reasoning complexity and low knowledge requirement, with an aim to disentangle multimodal reasoning from domain-specific knowledge to evaluate general reasoning abilities in non-expert settings." + } + ] + } + ], + "index": 31 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 211, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 211, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 211, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 108, + 79, + 504, + 203 + ], + "blocks": [ + { + "bbox": [ + 108, + 79, + 504, + 203 + ], + "lines": [ + { + "bbox": [ + 108, + 79, + 504, + 203 + ], + "spans": [ + { + "bbox": [ + 108, + 79, + 504, + 203 + ], + "type": "table", + "html": "
DatasetSizeReasoning LoadKnowledge Requirement% Easy Words Question TypeAnswer Type
LogiQA0.7KHeavyLight52.0TextText
GSM8K8.5KHeavyHeavy54.0TextText
WikiDiverse0.8KLightHeavy35.8Image+TextText
MathVista6.1KHeavyHeavy51.9Image+TextText
MMMU11.5KHeavyHeavy46.4Image+TextText
MATH-Vision3.0KHeavyHeavy53.8Image+TextImage+Text
MathVerse2.6KHeavyHeavy38.2Image+TextText
LogicBench1.5KHeavyLight53.6TextText
LogicVista0.4KHeavyHeavy41.2Image+TextImage
NaturalBench10KLightLight52.5Image+TextText
VISUALPUZZLES1.2KHeavyLight54.1Image+TextImage+Text
", + "image_path": "3baa80789cf99e4f68fa4353ef1538bb9d8872ab955ae28a990f5460c196ae12.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 142, + 209, + 468, + 222 + ], + "lines": [ + { + "bbox": [ + 142, + 209, + 468, + 222 + ], + "spans": [ + { + "bbox": [ + 142, + 209, + 468, + 222 + ], + "type": "text", + "content": "Table 9: Comparison of other existing benchmarks with VISUALPUZZLES" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 205, + 238, + 405, + 353 + ], + "blocks": [ + { + "bbox": [ + 205, + 238, + 405, + 353 + ], + "lines": [ + { + "bbox": [ + 205, + 238, + 405, + 353 + ], + "spans": [ + { + "bbox": [ + 205, + 238, + 405, + 353 + ], + "type": "image", + "image_path": "5a9945fb2950cbab9e5972ec413e60b62a9b0274be1ab32e1b5d8735f9bb79f7.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 111, + 360, + 499, + 374 + ], + "lines": [ + { + "bbox": [ + 111, + 360, + 499, + 374 + ], + "spans": [ + { + "bbox": [ + 111, + 360, + 499, + 374 + ], + "type": "text", + "content": "Figure 9: Comparison between VISUALPUZZLES and several widely-used benchmarks." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 397, + 506, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 397, + 506, + 465 + ], + "spans": [ + { + "bbox": [ + 104, + 397, + 506, + 465 + ], + "type": "text", + "content": "Table 10 compare the performance of various model families across MathVista, MMMU, and VISUALPUZZLES. Both MathVista and MMMU are benchmarks that have a heavy emphasis on both knowledge and reasoning, whereas VISUALPUZZLES assess models on domain-disentangled multimodal reasoning alone. We found that success on knowledge-intensive multimodal reasoning benchmarks as MathVista and MMMU does not always carry over to VISUALPUZZLES that emphasize reasoning rather than extensive pre-trained knowledge." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 485, + 242, + 499 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 485, + 242, + 499 + ], + "spans": [ + { + "bbox": [ + 105, + 485, + 242, + 499 + ], + "type": "text", + "content": "H Additional Analysis" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 513, + 271, + 526 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 513, + 271, + 526 + ], + "spans": [ + { + "bbox": [ + 105, + 513, + 271, + 526 + ], + "type": "text", + "content": "H.1 Proprietary V.S. Open Models" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 536, + 506, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 536, + 506, + 625 + ], + "spans": [ + { + "bbox": [ + 104, + 536, + 506, + 625 + ], + "type": "text", + "content": "From Table 2, proprietary models (e.g., o4-mini and Claude-3.7-Sonnet) consistently achieve higher overall accuracy than most open-source models on VISUALPUZZLES. However, some open models also show competitive or even higher performance in both the overall accuracy and specific reasoning categories. For instance, Qwen2.5-VL-72B-Instruct demonstrates higher performance than GPT-4o on algorithmic reasoning, deductive reasoning, spatial reasoning, and overall accuracy. This indicates that while proprietary models currently have leading performance, open models are also rapidly improving on multimodal reasoning capabilities." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 643, + 324, + 656 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 643, + 324, + 656 + ], + "spans": [ + { + "bbox": [ + 105, + 643, + 324, + 656 + ], + "type": "text", + "content": "H.2 Reasoning Category and Difficulty Levels" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 665, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 506, + 734 + ], + "type": "text", + "content": "Figure 11 and Figure 10 present complementary views of human accuracy against three representative models: o1 (one of the best-performing proprietary models), Qwen2.5-VL72B-Instruct (the strongest Qwen-based open model), and Llama-3.2-90B-Vision-Instruct (the strongest Llama-based open model). Specifically, Figure 10 compares performance across difficulty levels for each reasoning category, while Figure 11 compares performance across categories within each difficulty level." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 134, + 80, + 475, + 411 + ], + "blocks": [ + { + "bbox": [ + 134, + 80, + 475, + 411 + ], + "lines": [ + { + "bbox": [ + 134, + 80, + 475, + 411 + ], + "spans": [ + { + "bbox": [ + 134, + 80, + 475, + 411 + ], + "type": "table", + "html": "
ModelMathVistaMMMUVISUALPUZZLES
Human60.388.680.1
o173.978.251.8
GPT-4o63.869.141.1
Gemini-2.0-Flash-71.745.0
Gemini-1.5-Pro63.962.245.4
Claude-3.5-Sonnet67.768.342.4
Claude-3.7-Sonnet-71.848.3
Claude-3.7-Sonnet (Thinking)-75.048.3
LLaVA-1.5-7B-36.226.9
LLaVA-1.5-13B27.636.427.6
LLaVA-NeXT-7B35.834.627.4
LLaVA-NeXT-13B36.235.325.3
LLaVA-NeXT-34B46.551.129.8
LLaVA-OV-0.5B34.831.427.2
LLaVA-OV-7B63.248.829.4
LLaVA-OV-72B67.556.831.8
Llama-3.2-11B-Vision-Instruct51.550.729.4
Llama-3.2-90B-Vision-Instruct57.360.334.3
Qwen2-VL-72B70.564.532.1
QvQ-72B-Preview71.470.337.9
Qwen2-VL-2B-Instruct43.041.131.3
Qwen2-VL-7B-Instruct58.254.130.2
Qwen2-VL-72B-Instruct70.564.534.9
Qwen2.5-VL-3B-Instruct62.353.131.2
Qwen2.5-VL-7B-Instruct68.258.633.7
Qwen2.5-VL-72B-Instruct74.870.242.3
Cambrian-8B49.042.728.5
Cambrian-13B48.040.027.4
", + "image_path": "ce6eca9bc80c76f6a1977c79291f1241734efb0200616f04e4ea89031045eeaf.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 418, + 503, + 440 + ], + "lines": [ + { + "bbox": [ + 106, + 418, + 503, + 440 + ], + "spans": [ + { + "bbox": [ + 106, + 418, + 503, + 440 + ], + "type": "text", + "content": "Table 10: Comparison of other MathVista and MMMU with VISUALPUZZLES on human and SOTA models" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 106, + 478, + 504, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 478, + 504, + 534 + ], + "spans": [ + { + "bbox": [ + 106, + 478, + 504, + 534 + ], + "type": "text", + "content": "Humans consistently outperform all models across categories and difficulty levels, often by large margins. Notably, human performance remains high and relatively stable in the algorithmic, deductive, and spatial categories, even on hard questions. While accuracy does decline in analogical and inductive reasoning as difficulty increases, humans still maintain a clear advantage over models." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 539, + 504, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 539, + 504, + 606 + ], + "spans": [ + { + "bbox": [ + 106, + 539, + 504, + 606 + ], + "type": "text", + "content": "In contrast, model performance declines sharply as difficulty increases, especially for open-source models. Accuracy of Llama-3.2-90B-Vision-Instruct on hard analogical tasks drops to just " + }, + { + "bbox": [ + 106, + 539, + 504, + 606 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 106, + 539, + 504, + 606 + ], + "type": "text", + "content": ". Even one of the strongest proprietary models, o1, while more robust, still lags significantly behind humans, particularly on analogical, inductive, and spatial tasks. On easy tasks, some models perform competitively in certain categories, but this advantage largely disappears on medium and hard questions." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 610, + 504, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 610, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 106, + 610, + 504, + 732 + ], + "type": "text", + "content": "Interestingly, these models maintain a generally stable performance on algorithmic and deductive reasoning. For o1 and Qwen2.5-VL-72B-Instruct, their performances on algorithmic reasoning even go up for more difficult tasks, whereas human performance degraded as the difficulty level increases. However, all models, including o1, perform the worse at analogical, inductive and spatial reasoning in general, especially as the difficulty level increases. This suggests that models are relatively better at tasks requiring structured, rule-based algorithmic processing, while their performance degrades more steeply in tasks requiring relational abstraction (analogical), pattern induction (inductive), and visual understanding (spatial), particularly as the difficulty level increases. In summary, these results indicate that while some models exhibit promising performance on structured and easier reasoning tasks, multimodal models still struggle with abstract and complex reasoning, particularly" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 211, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 211, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 211, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 79, + 504, + 195 + ], + "blocks": [ + { + "bbox": [ + 106, + 79, + 504, + 195 + ], + "lines": [ + { + "bbox": [ + 106, + 79, + 504, + 195 + ], + "spans": [ + { + "bbox": [ + 106, + 79, + 504, + 195 + ], + "type": "image", + "image_path": "e07d22a4f5fa74094ba8126af08a759927de508d0089c6af0dc1d22ad43d3d84.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 106, + 195, + 504, + 309 + ], + "blocks": [ + { + "bbox": [ + 106, + 195, + 504, + 309 + ], + "lines": [ + { + "bbox": [ + 106, + 195, + 504, + 309 + ], + "spans": [ + { + "bbox": [ + 106, + 195, + 504, + 309 + ], + "type": "image", + "image_path": "2eb08ceca733cd8c85e3edca9cf46721d82bbaf628b9e06926a75a16c7a708b2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 106, + 310, + 504, + 423 + ], + "blocks": [ + { + "bbox": [ + 106, + 310, + 504, + 423 + ], + "lines": [ + { + "bbox": [ + 106, + 310, + 504, + 423 + ], + "spans": [ + { + "bbox": [ + 106, + 310, + 504, + 423 + ], + "type": "image", + "image_path": "c5e2c4db744c3657fe1cb4531ced0aa2d78982d0bb5fcdc58270986c6c22c141.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 106, + 424, + 504, + 538 + ], + "blocks": [ + { + "bbox": [ + 106, + 424, + 504, + 538 + ], + "lines": [ + { + "bbox": [ + 106, + 424, + 504, + 538 + ], + "spans": [ + { + "bbox": [ + 106, + 424, + 504, + 538 + ], + "type": "image", + "image_path": "70f13531e69835c70feace1f0c5d4cb9f407929b98d21c7bf77f8b7957c853c2.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 106, + 539, + 504, + 654 + ], + "blocks": [ + { + "bbox": [ + 106, + 539, + 504, + 654 + ], + "lines": [ + { + "bbox": [ + 106, + 539, + 504, + 654 + ], + "spans": [ + { + "bbox": [ + 106, + 539, + 504, + 654 + ], + "type": "image", + "image_path": "dc9f11d9c6ca0344d00277395624f99e2b3066e6ca555339dae6e6a396995963.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 662, + 507, + 709 + ], + "lines": [ + { + "bbox": [ + 104, + 662, + 507, + 709 + ], + "spans": [ + { + "bbox": [ + 104, + 662, + 507, + 709 + ], + "type": "text", + "content": "Figure 10: Comparison of accuracy across different reasoning categories for human participants, one of the best performing proprietary models o1, the best performing Qwen-based open model Qwen2.5-VL-72B-Instruct, and the best performing Llama-based open model Llama-3.2-90B-Vision-Instruct, measured on difficulty levels." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 81, + 504, + 208 + ], + "blocks": [ + { + "bbox": [ + 107, + 81, + 504, + 208 + ], + "lines": [ + { + "bbox": [ + 107, + 81, + 504, + 208 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 504, + 208 + ], + "type": "image", + "image_path": "26d9b479b586179672f0a391c3433b4482c8e8deeca5515d3b7db271c2635940.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 106, + 209, + 504, + 337 + ], + "blocks": [ + { + "bbox": [ + 106, + 209, + 504, + 337 + ], + "lines": [ + { + "bbox": [ + 106, + 209, + 504, + 337 + ], + "spans": [ + { + "bbox": [ + 106, + 209, + 504, + 337 + ], + "type": "image", + "image_path": "2df83bc62266ef79c8c7d72021db90c5bdc93adf7f3d635def5ee2e1de1692b1.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 106, + 338, + 504, + 467 + ], + "blocks": [ + { + "bbox": [ + 106, + 338, + 504, + 467 + ], + "lines": [ + { + "bbox": [ + 106, + 338, + 504, + 467 + ], + "spans": [ + { + "bbox": [ + 106, + 338, + 504, + 467 + ], + "type": "image", + "image_path": "932cbaf51c205a9c980fb43076c9fd7e28519b492074fde5af65adb7c67653ef.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 475, + 507, + 522 + ], + "lines": [ + { + "bbox": [ + 104, + 475, + 507, + 522 + ], + "spans": [ + { + "bbox": [ + 104, + 475, + 507, + 522 + ], + "type": "text", + "content": "Figure 11: Comparison of accuracy across different difficulty levels for human participants, one of the best performing proprietary models o1, the best performing Qwen-based open model Qwen2.5-VL-72B-Instruct, and the best performing Llama-based open model Llama3.2-90B-Vision-Instruct, measured across reasoning categories." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 541, + 504, + 565 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 541, + 504, + 565 + ], + "spans": [ + { + "bbox": [ + 104, + 541, + 504, + 565 + ], + "type": "text", + "content": "when difficulty increases. Bridging the gap between model and human reasoning remains a critical challenge." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 578, + 294, + 590 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 578, + 294, + 590 + ], + "spans": [ + { + "bbox": [ + 105, + 578, + 294, + 590 + ], + "type": "text", + "content": "H.3 Option Types and Difficulty Levels" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 598, + 506, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 598, + 506, + 644 + ], + "spans": [ + { + "bbox": [ + 104, + 598, + 506, + 644 + ], + "type": "text", + "content": "Figure 12 compares human accuracy against three representative models, o1 (one of the best-performing proprietary models), Qwen2.5-VL-72B-Instruct (the strongest Qwen-based open model), and Llama-3.2-90B-Vision-Instruct (the strongest Llama-based open model), across different difficulty levels, separately for textual and visual answer options." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 648, + 504, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 648, + 504, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 648, + 504, + 704 + ], + "type": "text", + "content": "Across all participants and models, we observe a consistent pattern: text-based options result in higher accuracy than image-based options, with the performance gap widening as task difficulty increases. This trend holds even for human participants, whose accuracy drops from " + }, + { + "bbox": [ + 104, + 648, + 504, + 704 + ], + "type": "inline_equation", + "content": "92\\%" + }, + { + "bbox": [ + 104, + 648, + 504, + 704 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 648, + 504, + 704 + ], + "type": "inline_equation", + "content": "40\\%" + }, + { + "bbox": [ + 104, + 648, + 504, + 704 + ], + "type": "text", + "content": " on visual options when moving from easy to hard tasks, compared to a much smaller drop on text-based ones (" + }, + { + "bbox": [ + 104, + 648, + 504, + 704 + ], + "type": "inline_equation", + "content": "93\\%" + }, + { + "bbox": [ + 104, + 648, + 504, + 704 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 648, + 504, + 704 + ], + "type": "inline_equation", + "content": "73\\%" + }, + { + "bbox": [ + 104, + 648, + 504, + 704 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "type": "text", + "content": "For models, the gap is even more pronounced. For instance, Qwen2.5-VL-72B-Instruct achieves " + }, + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "type": "inline_equation", + "content": "58\\%" + }, + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "type": "text", + "content": " accuracy on hard questions with text options, but only " + }, + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "type": "text", + "content": " when image" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 81, + 304, + 182 + ], + "blocks": [ + { + "bbox": [ + 107, + 81, + 304, + 182 + ], + "lines": [ + { + "bbox": [ + 107, + 81, + 304, + 182 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 304, + 182 + ], + "type": "image", + "image_path": "3737d6ab696a3c310dab5c98fb390eedc23b20fda6553c6b5310acdd5f1eabeb.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 319, + 81, + 503, + 182 + ], + "blocks": [ + { + "bbox": [ + 319, + 81, + 503, + 182 + ], + "lines": [ + { + "bbox": [ + 319, + 81, + 503, + 182 + ], + "spans": [ + { + "bbox": [ + 319, + 81, + 503, + 182 + ], + "type": "image", + "image_path": "60a6175bfd05a6f7af089e7517aaca78aec0f7e4c49b2736be538da2e6e2dcfa.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 361, + 183, + 473, + 190 + ], + "lines": [ + { + "bbox": [ + 361, + 183, + 473, + 190 + ], + "spans": [ + { + "bbox": [ + 361, + 183, + 473, + 190 + ], + "type": "text", + "content": "Llama-3.2-90B-Vision-Instruct" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 106, + 190, + 302, + 285 + ], + "blocks": [ + { + "bbox": [ + 167, + 183, + 263, + 190 + ], + "lines": [ + { + "bbox": [ + 167, + 183, + 263, + 190 + ], + "spans": [ + { + "bbox": [ + 167, + 183, + 263, + 190 + ], + "type": "text", + "content": "Owen2.5-VL-72B-Instruct" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 106, + 190, + 302, + 285 + ], + "lines": [ + { + "bbox": [ + 106, + 190, + 302, + 285 + ], + "spans": [ + { + "bbox": [ + 106, + 190, + 302, + 285 + ], + "type": "image", + "image_path": "89b0feaa5116c74ddcd31055f2b342bfdeeb7a6e14a35f8811b249750b39baac.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 293, + 506, + 339 + ], + "lines": [ + { + "bbox": [ + 104, + 293, + 506, + 339 + ], + "spans": [ + { + "bbox": [ + 104, + 293, + 506, + 339 + ], + "type": "text", + "content": "Figure 12: Comparison of accuracy across different difficulty levels for human participants, one of the best performing proprietary model o1, the best performing Qwen-based open model Qwen2.5-VL-72B-Instruct, and the best performing Llama-based open model Llama3.2-90B-Vision-Instruct, measured on textual v.s. visual option types." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 320, + 191, + 503, + 285 + ], + "blocks": [ + { + "bbox": [ + 320, + 191, + 503, + 285 + ], + "lines": [ + { + "bbox": [ + 320, + 191, + 503, + 285 + ], + "spans": [ + { + "bbox": [ + 320, + 191, + 503, + 285 + ], + "type": "image", + "image_path": "ece4b18303218047608cad7c13202f459437b64ddffa0465d71243302f485618.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 358, + 506, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 358, + 506, + 437 + ], + "spans": [ + { + "bbox": [ + 104, + 358, + 506, + 437 + ], + "type": "text", + "content": "options are used. o1 and Llama-3.2-90B-Vision-Instruct exhibit similar drops, suggesting a broad weakness in multi-image reasoning and visual option discrimination. These findings suggest that image-based answer options introduce significant additional complexity, requiring models not just to understand the question but to reason over multiple visual cues. This capability is essential for real-world tasks such as product selection, recommendation, and visual planning, where their decision-making process often depends on comparing visual content." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 441, + 506, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 441, + 506, + 521 + ], + "spans": [ + { + "bbox": [ + 104, + 441, + 506, + 521 + ], + "type": "text", + "content": "However, most pretraining datasets and benchmarks have traditionally emphasized textual QA formats, with far fewer examples involving visual options or structured visual comparisons. As a result, models may lack the inductive bias or learned attention mechanisms to handle visual alternatives effectively. These results highlight an important direction for future work: expanding and diversifying training corpora to include multi-choice visual reasoning tasks, and developing architectures that are explicitly designed to process and compare visual candidates, especially under challenging conditions." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 533, + 249, + 547 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 533, + 249, + 547 + ], + "spans": [ + { + "bbox": [ + 105, + 533, + 249, + 547 + ], + "type": "text", + "content": "H.4 Case Study of Reasoning" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 553, + 506, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 553, + 506, + 588 + ], + "spans": [ + { + "bbox": [ + 104, + 553, + 506, + 588 + ], + "type": "text", + "content": "Figure 13 shows a case study demonstrating the similarity in structure and reasoning strategy between Claude-3.7-Sonnet and Claude-3.7-Sonnet-Thinking. Average textual similarity between model responses of these two models on VISUALPUZZLES is 0.9." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 601, + 200, + 613 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 601, + 200, + 613 + ], + "spans": [ + { + "bbox": [ + 105, + 601, + 200, + 613 + ], + "type": "text", + "content": "H.5 Impact of CoT" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 621, + 284, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 621, + 284, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 621, + 284, + 732 + ], + "type": "text", + "content": "Table 11 compares model performance under two prompting strategies: direct multiple-choice prompt vs. Chain-of-Thought (CoT) prompt. We observe that proprietary models and larger open models " + }, + { + "bbox": [ + 104, + 621, + 284, + 732 + ], + "type": "inline_equation", + "content": "(\\geq 72\\mathrm{B})" + }, + { + "bbox": [ + 104, + 621, + 284, + 732 + ], + "type": "text", + "content": " benefit from CoT, while others show little to no improvement or even a decline in performance with CoT. For instance, both GPT-4o and Qwen2.5-VL-72B-Instruct show more than " + }, + { + "bbox": [ + 104, + 621, + 284, + 732 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 104, + 621, + 284, + 732 + ], + "type": "text", + "content": " in" + } + ] + } + ], + "index": 13 + }, + { + "type": "table", + "bbox": [ + 288, + 592, + 509, + 714 + ], + "blocks": [ + { + "bbox": [ + 288, + 592, + 509, + 714 + ], + "lines": [ + { + "bbox": [ + 288, + 592, + 509, + 714 + ], + "spans": [ + { + "bbox": [ + 288, + 592, + 509, + 714 + ], + "type": "table", + "html": "
ModelDirectCoT
GPT-4o34.041.6
Gemini-1.5-Pro41.045.1
Claude-3.5-Sonnet40.042.5
Qwen2-VL-2B-Instruct31.326.1
Qwen2.5-VL-7B-Instruct33.732.0
Cambrian-13B27.426.5
LLaVA-NeXT-34B29.829.6
Qwen2.5-VL-72B-Instruct38.642.3
LLama-3.2-90B-Vision-Instruct33.333.9
", + "image_path": "999904e34e7035b4b23684e5a2a42c123a5cad6b8ceb3076290e805a48da6d9f.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 288, + 715, + 507, + 739 + ], + "lines": [ + { + "bbox": [ + 288, + 715, + 507, + 739 + ], + "spans": [ + { + "bbox": [ + 288, + 715, + 507, + 739 + ], + "type": "text", + "content": "Table 11: Comparison of models with Direct Multiple Choice and CoT prompting." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 81, + 504, + 345 + ], + "blocks": [ + { + "bbox": [ + 106, + 81, + 504, + 345 + ], + "lines": [ + { + "bbox": [ + 106, + 81, + 504, + 345 + ], + "spans": [ + { + "bbox": [ + 106, + 81, + 504, + 345 + ], + "type": "table", + "html": "
Question: Given a pair of chairs arranged at a specific angle, \nselect the option from the four provided that matches the angle of \nthe chairs in the question. (Answer: C)Claude-3.7-Sonnet (With Thinking) \nLet me analyze this problem carefully.
The question shows a “question pair” of chairs at the top of the image, and I need to find which of the four options (A, B, C, D) matches \nthe angle arrangement of those chairs.
Let me examine the question pair first:
- The question pair shows two folding chairs that appear to be facing inward toward each other at approximately a 90-degree angle (the \nchairs are at roughly right angles to each other).
- The chairs in the question pair are shown separated by a colon “:” symbol.
Claude-3.7-Sonnet (Without Thinking) \nI need to analyze the angle relationship between the pair of chairs \nin the question and compare it with the four options provided. \nLooking at the question image at the top: \n- The first chair (on the left) is shown at an angle, with its side \nprofile partially visible \n- The second chair (on the right) is shown more from the front, \nfacing more directly toward the viewer \n- The chairs are positioned at roughly a 90-degree angle to each \nother \nNow examining each option: \nOption A: Two simple chairs with backs. The left chair is shown \nfrom a side angle, while the right chair is shown more from the \nfront. Their orientation appears to be at approximately a 90-degree \nangle to each other. \nOption B: Two backless chairs/stools. The left chair has an angled \ndesign and is shown from the side, while the right chair is shown \nmore straight-on. The angle between them doesn’t appear to \nmatch the 90-degree relationship in the question. \nOption C: Two office/drafting chairs with wheels. Both chairs are \nshown from similar angles, with only slight variation in their \norientation. They don’t appear to have the same 90-degree \nrelationship as the question chairs. \nOption D: Two bar stools with round seats and circular bases. Both \nare shown from similar angles with minimal difference in orientation. \nThey don’t demonstrate the 90-degree relationship seen in the \nquestion. \nComparing all options to the question image, Option A most \nclosely matches the angular relationship between the two chairs in \nthe question. \nAnswer: ANow let me examine each option: \nOption A: \n- Shows two simple chairs with backrests \n- The chairs are facing each other at approximately a 90-degree angle \n- The arrangement looks similar to the question pair in terms of the angle
Option B: \n- Shows two chairs with different designs - one appears to be a more angled/tilted chair and the other is more upright \n- They don’t appear to be at the same angle relationship as the question pair
Option C: \n- Shows two office/drafting chairs with wheels and height adjustment \n- The chairs appear to be facing each other at approximately a 90-degree angle \n- The arrangement looks similar to the question pair in terms of angle
Option D: \n- Shows two bar-style chairs with round seats and central pedestals \n- The chairs appear to be directly facing each other (180-degree relationship) \n- This doesn’t match the angle relationship in the question pair
Based on the angle relationship, both Options A and C show chairs at approximately a 90-degree angle to each other, similar to the \nquestion pair. However, Option A’s chair style and arrangement seems closer to the question pair’s folding chairs.
So, looking at the angle between the chairs, which is the key aspect of this question, Options A and C both show approximately \n90-degree angles between chairs, while Option B shows chairs at different angles, and Option D shows chairs directly facing each other. \nSince the question specifically asks about the angle match, I need to determine which option best matches the exact angle of the \nquestion pair.
Looking closely again: \nThe question pair shows chairs that are facing slightly inward, at approximately 90 degrees. \nOption A shows a similar angle relationship of roughly 90 degrees. \nOption C also shows a similar angle relationship of roughly 90 degrees. \nThis is a bit challenging with the 2D representation, but I think Option A most closely matches the angle relationship in the question pair.
Answer: A
", + "image_path": "c86427df64f338c67429e5c9fd24fa20e14af7ab99cc71ca9cd42b04000c044d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 348, + 504, + 381 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 348, + 504, + 381 + ], + "spans": [ + { + "bbox": [ + 104, + 348, + 504, + 381 + ], + "type": "text", + "content": "Figure 13: Case Study showing the similarity in structure and reasoning strategy between Claude-3.7-Sonnet-Thinking and Claude-3.7-Sonnet. Similarity between model responses of these two models on VISUALPUZZLES is 0.9." + } + ] + } + ], + "index": 2, + "type": "text" + }, + { + "bbox": [ + 104, + 403, + 506, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 403, + 506, + 460 + ], + "spans": [ + { + "bbox": [ + 104, + 403, + 506, + 460 + ], + "type": "text", + "content": "creases in performance when using CoT. In contrast, several smaller models, such as Qwen2-VL-2B-Instruct and Cambrian-13B, exhibit decreased accuracy with CoT prompting. These results suggest that CoT can indeed enhance the reasoning capability of larger models whereas it may introduce unnecessary complexity or confusion for smaller models and thus decreasing performance." + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 80, + 187, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 80, + 187, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 80, + 187, + 95 + ], + "type": "text", + "content": "I Case Study" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 257, + 119, + 349, + 133 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 257, + 119, + 349, + 133 + ], + "spans": [ + { + "bbox": [ + 257, + 119, + 349, + 133 + ], + "type": "text", + "content": "Algorithmic (Easy)" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 365, + 120, + 377, + 131 + ], + "blocks": [ + { + "bbox": [ + 365, + 120, + 377, + 131 + ], + "lines": [ + { + "bbox": [ + 365, + 120, + 377, + 131 + ], + "spans": [ + { + "bbox": [ + 365, + 120, + 377, + 131 + ], + "type": "image", + "image_path": "424714a384e3ef3140a976266073aef801742fb243abbc70eecfcc5dd6030446.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 106, + 139, + 250, + 213 + ], + "blocks": [ + { + "bbox": [ + 106, + 139, + 250, + 213 + ], + "lines": [ + { + "bbox": [ + 106, + 139, + 250, + 213 + ], + "spans": [ + { + "bbox": [ + 106, + 139, + 250, + 213 + ], + "type": "image", + "image_path": "6ff5da0dfd71ea7192bbc4b256b148718f303bd5f12d4f39bba8c9a93445e556.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 138, + 704, + 470, + 718 + ], + "lines": [ + { + "bbox": [ + 138, + 704, + 470, + 718 + ], + "spans": [ + { + "bbox": [ + 138, + 704, + 470, + 718 + ], + "type": "text", + "content": "Figure 14: A sample error case of Algorithmic Reasoning (difficulty: Easy)." + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 257, + 152, + 493, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 257, + 152, + 493, + 173 + ], + "spans": [ + { + "bbox": [ + 257, + 152, + 493, + 173 + ], + "type": "text", + "content": "Question: There are seven squares in the picture. How many more triangles than squares are there in the picture?" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 258, + 174, + 372, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 258, + 174, + 372, + 184 + ], + "spans": [ + { + "bbox": [ + 258, + 174, + 372, + 184 + ], + "type": "text", + "content": "Options: (A) 1 (B) 2 (C) 3 (D) 4" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 258, + 189, + 329, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 258, + 189, + 329, + 200 + ], + "spans": [ + { + "bbox": [ + 258, + 189, + 329, + 200 + ], + "type": "text", + "content": "Ground Truth: (C)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 108, + 219, + 501, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 219, + 501, + 240 + ], + "spans": [ + { + "bbox": [ + 108, + 219, + 501, + 240 + ], + "type": "text", + "content": "Claude-3.7-Sonnet-Thinking: I need to count the number of squares and triangles in the given picture, and then find how many more triangles there are than squares." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 110, + 249, + 223, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 249, + 223, + 258 + ], + "spans": [ + { + "bbox": [ + 110, + 249, + 223, + 258 + ], + "type": "text", + "content": "First, let me count the squares:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 110, + 259, + 418, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 259, + 418, + 270 + ], + "spans": [ + { + "bbox": [ + 110, + 259, + 418, + 270 + ], + "type": "text", + "content": "1. I can see 7 small squares arranged in a triangular pattern within the large triangle." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 110, + 278, + 228, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 278, + 228, + 289 + ], + "spans": [ + { + "bbox": [ + 110, + 278, + 228, + 289 + ], + "type": "text", + "content": "Now, let me count the triangles:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 110, + 289, + 324, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 289, + 324, + 299 + ], + "spans": [ + { + "bbox": [ + 110, + 289, + 324, + 299 + ], + "type": "text", + "content": "1. There's the large outer triangle that contains everything." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 110, + 299, + 424, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 299, + 424, + 309 + ], + "spans": [ + { + "bbox": [ + 110, + 299, + 424, + 309 + ], + "type": "text", + "content": "2. There are small triangles formed by the division of the large triangle by the squares." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 110, + 319, + 469, + 369 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 110, + 319, + 272, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 319, + 272, + 329 + ], + "spans": [ + { + "bbox": [ + 110, + 319, + 272, + 329 + ], + "type": "text", + "content": "Let me count these small triangles carefully:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 110, + 329, + 423, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 329, + 423, + 338 + ], + "spans": [ + { + "bbox": [ + 110, + 329, + 423, + 338 + ], + "type": "text", + "content": "- Along the bottom row, I see 4 triangles (formed by the squares and the bottom edge)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 110, + 339, + 457, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 339, + 457, + 348 + ], + "spans": [ + { + "bbox": [ + 110, + 339, + 457, + 348 + ], + "type": "text", + "content": "- On the left side of the large triangle, I see 3 triangles (formed by the squares and the left edge)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 110, + 349, + 469, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 349, + 469, + 358 + ], + "spans": [ + { + "bbox": [ + 110, + 349, + 469, + 358 + ], + "type": "text", + "content": "- On the right side of the large triangle, I see 3 triangles (formed by the squares and the right edge)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 110, + 359, + 304, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 359, + 304, + 369 + ], + "spans": [ + { + "bbox": [ + 110, + 359, + 304, + 369 + ], + "type": "text", + "content": "- Don't forget the large outer triangle itself: 1 triangle" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 108, + 377, + 247, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 377, + 247, + 388 + ], + "spans": [ + { + "bbox": [ + 108, + 377, + 247, + 388 + ], + "type": "text", + "content": "So in total: " + }, + { + "bbox": [ + 108, + 377, + 247, + 388 + ], + "type": "inline_equation", + "content": "4 + 3 + 3 + 1 = 11" + }, + { + "bbox": [ + 108, + 377, + 247, + 388 + ], + "type": "text", + "content": " triangles" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 108, + 397, + 220, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 397, + 220, + 407 + ], + "spans": [ + { + "bbox": [ + 108, + 397, + 220, + 407 + ], + "type": "text", + "content": "Now I can find the difference:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 110, + 407, + 304, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 407, + 304, + 418 + ], + "spans": [ + { + "bbox": [ + 110, + 407, + 304, + 418 + ], + "type": "text", + "content": "Number of triangles - Number of squares " + }, + { + "bbox": [ + 110, + 407, + 304, + 418 + ], + "type": "inline_equation", + "content": "= 11 - 7 = 4" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 108, + 427, + 316, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 427, + 316, + 438 + ], + "spans": [ + { + "bbox": [ + 108, + 427, + 316, + 438 + ], + "type": "text", + "content": "So there are 4 more triangles than squares in the picture." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 108, + 447, + 268, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 447, + 268, + 457 + ], + "spans": [ + { + "bbox": [ + 108, + 447, + 268, + 457 + ], + "type": "text", + "content": "Looking at the options, the answer is (D) 4." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 110, + 457, + 170, + 467 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 457, + 170, + 467 + ], + "spans": [ + { + "bbox": [ + 110, + 457, + 170, + 467 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 109, + 467, + 496, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 467, + 496, + 478 + ], + "spans": [ + { + "bbox": [ + 109, + 467, + 496, + 478 + ], + "type": "text", + "content": "Looking at the image, I need to count the total number of squares and triangles, then find their difference." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 109, + 487, + 209, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 487, + 209, + 496 + ], + "spans": [ + { + "bbox": [ + 109, + 487, + 209, + 496 + ], + "type": "text", + "content": "First, I'll count the squares:" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 109, + 497, + 413, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 497, + 413, + 507 + ], + "spans": [ + { + "bbox": [ + 109, + 497, + 413, + 507 + ], + "type": "text", + "content": "- I can see 7 small squares arranged in a triangular pattern inside the large triangle." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 109, + 516, + 215, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 516, + 215, + 526 + ], + "spans": [ + { + "bbox": [ + 109, + 516, + 215, + 526 + ], + "type": "text", + "content": "Next, I'll count the triangles:" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 110, + 527, + 376, + 567 + ], + "type": "list", + "angle": 0, + "index": 34, + "blocks": [ + { + "bbox": [ + 110, + 527, + 327, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 527, + 327, + 536 + ], + "spans": [ + { + "bbox": [ + 110, + 527, + 327, + 536 + ], + "type": "text", + "content": "1. There is the large outer triangle that contains everything." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 110, + 537, + 376, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 537, + 376, + 546 + ], + "spans": [ + { + "bbox": [ + 110, + 537, + 376, + 546 + ], + "type": "text", + "content": "2. Along the bottom edge of the large triangle, there are 4 small triangles." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 110, + 547, + 362, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 547, + 362, + 556 + ], + "spans": [ + { + "bbox": [ + 110, + 547, + 362, + 556 + ], + "type": "text", + "content": "3. Along the left edge of the large triangle, there are 3 small triangles." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 110, + 557, + 368, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 557, + 368, + 567 + ], + "spans": [ + { + "bbox": [ + 110, + 557, + 368, + 567 + ], + "type": "text", + "content": "4. Along the right edge of the large triangle, there are 3 small triangles." + } + ] + } + ], + "index": 33 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 109, + 575, + 308, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 575, + 308, + 586 + ], + "spans": [ + { + "bbox": [ + 109, + 575, + 308, + 586 + ], + "type": "text", + "content": "Total number of triangles " + }, + { + "bbox": [ + 109, + 575, + 308, + 586 + ], + "type": "inline_equation", + "content": "= 1 + 4 + 3 + 3 = 11" + }, + { + "bbox": [ + 109, + 575, + 308, + 586 + ], + "type": "text", + "content": " triangles" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 109, + 595, + 364, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 595, + 364, + 605 + ], + "spans": [ + { + "bbox": [ + 109, + 595, + 364, + 605 + ], + "type": "text", + "content": "Now I can calculate how many more triangles than squares there are:" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 110, + 606, + 262, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 606, + 262, + 616 + ], + "spans": [ + { + "bbox": [ + 110, + 606, + 262, + 616 + ], + "type": "text", + "content": "11 triangles - 7 squares = 4 more triangles" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 110, + 625, + 151, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 625, + 151, + 634 + ], + "spans": [ + { + "bbox": [ + 110, + 625, + 151, + 634 + ], + "type": "text", + "content": "Answer: D" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 110, + 639, + 231, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 639, + 231, + 649 + ], + "spans": [ + { + "bbox": [ + 110, + 639, + 231, + 649 + ], + "type": "text", + "content": "Error Category: Reasoning Error" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 109, + 649, + 501, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 649, + 501, + 689 + ], + "spans": [ + { + "bbox": [ + 109, + 649, + 501, + 689 + ], + "type": "text", + "content": "Error Reason: The model gets the answer wrong because it miscounts the number of triangles, specifically double-counting the triangles formed by small triangles and squares. The response incorrectly conclude there are 11 triangles, but a careful manual inspection shows only 10 distinct triangles, making the correct difference 3, not 4." + } + ] + } + ], + "index": 40 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 42 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "bbox": [ + 257, + 88, + 348, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 257, + 88, + 348, + 102 + ], + "spans": [ + { + "bbox": [ + 257, + 88, + 348, + 102 + ], + "type": "text", + "content": "Algorithmic (Easy)" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 363, + 89, + 375, + 100 + ], + "blocks": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "lines": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "spans": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "type": "image", + "image_path": "61f9deaf7571114247539cebe087ca5c23f67840862b57d8b656849458a44f08.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 115, + 127, + 166, + 171 + ], + "blocks": [ + { + "bbox": [ + 115, + 127, + 166, + 171 + ], + "lines": [ + { + "bbox": [ + 115, + 127, + 166, + 171 + ], + "spans": [ + { + "bbox": [ + 115, + 127, + 166, + 171 + ], + "type": "image", + "image_path": "7461112c409fd02eba1473e6933495500624a55d6357cb99ddfdfec1549ea9f6.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 135, + 723, + 474, + 736 + ], + "lines": [ + { + "bbox": [ + 135, + 723, + 474, + 736 + ], + "spans": [ + { + "bbox": [ + 135, + 723, + 474, + 736 + ], + "type": "text", + "content": "Figure 15: A sample correct case of Algorithmic Reasoning (difficulty: Easy)." + } + ] + } + ], + "index": 69, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 187, + 118, + 223, + 171 + ], + "blocks": [ + { + "bbox": [ + 175, + 161, + 186, + 171 + ], + "lines": [ + { + "bbox": [ + 175, + 161, + 186, + 171 + ], + "spans": [ + { + "bbox": [ + 175, + 161, + 186, + 171 + ], + "type": "text", + "content": "(B)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 187, + 118, + 223, + 171 + ], + "lines": [ + { + "bbox": [ + 187, + 118, + 223, + 171 + ], + "spans": [ + { + "bbox": [ + 187, + 118, + 223, + 171 + ], + "type": "image", + "image_path": "fb31c8ae8c6c1b5b7fe9668564add2294271e6b74599c26aa0c6c7eea16db2e7.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 235, + 118, + 288, + 171 + ], + "blocks": [ + { + "bbox": [ + 235, + 118, + 288, + 171 + ], + "lines": [ + { + "bbox": [ + 235, + 118, + 288, + 171 + ], + "spans": [ + { + "bbox": [ + 235, + 118, + 288, + 171 + ], + "type": "image", + "image_path": "2d3a21ad802b33bf71c70e0abd227d85a77a0230e56bf9bc88272a39454b0e56.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 304, + 118, + 343, + 171 + ], + "blocks": [ + { + "bbox": [ + 294, + 161, + 305, + 171 + ], + "lines": [ + { + "bbox": [ + 294, + 161, + 305, + 171 + ], + "spans": [ + { + "bbox": [ + 294, + 161, + 305, + 171 + ], + "type": "text", + "content": "(D)" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 304, + 118, + 343, + 171 + ], + "lines": [ + { + "bbox": [ + 304, + 118, + 343, + 171 + ], + "spans": [ + { + "bbox": [ + 304, + 118, + 343, + 171 + ], + "type": "image", + "image_path": "b3960c57aa37463f6a6aae7ee509937f01607f72a797a14974a5afe562bad84c.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 347, + 112, + 485, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 347, + 112, + 485, + 160 + ], + "spans": [ + { + "bbox": [ + 347, + 112, + 485, + 160 + ], + "type": "text", + "content": "Question: In the Kangaroo constellation, all stars have a number greater than 3 and their sum is 20. Which option shows the Kangaroo constellation?" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 348, + 165, + 416, + 176 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 348, + 165, + 416, + 176 + ], + "spans": [ + { + "bbox": [ + 348, + 165, + 416, + 176 + ], + "type": "text", + "content": "Ground Truth: (B)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 113, + 186, + 496, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 186, + 496, + 204 + ], + "spans": [ + { + "bbox": [ + 113, + 186, + 496, + 204 + ], + "type": "text", + "content": "Claude-3.7-Sonnet-Thinking: The question asks about the Kangaroo constellation, which has the following properties:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 114, + 204, + 233, + 219 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 114, + 204, + 233, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 204, + 233, + 212 + ], + "spans": [ + { + "bbox": [ + 114, + 204, + 233, + 212 + ], + "type": "text", + "content": "1. All stars have a number greater than 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 114, + 212, + 206, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 212, + 206, + 219 + ], + "spans": [ + { + "bbox": [ + 114, + 212, + 206, + 219 + ], + "type": "text", + "content": "2. The sum of all numbers is 20" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 227, + 315, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 227, + 315, + 236 + ], + "spans": [ + { + "bbox": [ + 113, + 227, + 315, + 236 + ], + "type": "text", + "content": "Let me check each option to see which one satisfies these conditions:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 113, + 243, + 149, + 251 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 243, + 149, + 251 + ], + "spans": [ + { + "bbox": [ + 113, + 243, + 149, + 251 + ], + "type": "text", + "content": "Option (A):" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 114, + 252, + 193, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 252, + 193, + 259 + ], + "spans": [ + { + "bbox": [ + 114, + 252, + 193, + 259 + ], + "type": "text", + "content": "Stars have values: 5, 4, 7, 6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 114, + 260, + 281, + 274 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 114, + 260, + 281, + 267 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 260, + 281, + 267 + ], + "spans": [ + { + "bbox": [ + 114, + 260, + 281, + 267 + ], + "type": "text", + "content": "- Are all numbers greater than 3? Yes (5, 4, 7, 6 are all " + }, + { + "bbox": [ + 114, + 260, + 281, + 267 + ], + "type": "inline_equation", + "content": ">3" + }, + { + "bbox": [ + 114, + 260, + 281, + 267 + ], + "type": "text", + "content": ")" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 114, + 268, + 186, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 268, + 186, + 274 + ], + "spans": [ + { + "bbox": [ + 114, + 268, + 186, + 274 + ], + "type": "text", + "content": "- Sum: " + }, + { + "bbox": [ + 114, + 268, + 186, + 274 + ], + "type": "inline_equation", + "content": "5 + 4 + 7 + 6 = {22}" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 114, + 275, + 239, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 275, + 239, + 283 + ], + "spans": [ + { + "bbox": [ + 114, + 275, + 239, + 283 + ], + "type": "text", + "content": "This doesn't match the required sum of 20." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 113, + 291, + 148, + 299 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 291, + 148, + 299 + ], + "spans": [ + { + "bbox": [ + 113, + 291, + 148, + 299 + ], + "type": "text", + "content": "Option (B):" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 114, + 300, + 274, + 322 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 114, + 300, + 186, + 306 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 300, + 186, + 306 + ], + "spans": [ + { + "bbox": [ + 114, + 300, + 186, + 306 + ], + "type": "text", + "content": "Stars have values: 5, 8, 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 114, + 308, + 274, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 308, + 274, + 315 + ], + "spans": [ + { + "bbox": [ + 114, + 308, + 274, + 315 + ], + "type": "text", + "content": "- Are all numbers greater than 3? Yes (5, 8, 7 are all " + }, + { + "bbox": [ + 114, + 308, + 274, + 315 + ], + "type": "inline_equation", + "content": ">3" + }, + { + "bbox": [ + 114, + 308, + 274, + 315 + ], + "type": "text", + "content": ")" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 114, + 316, + 175, + 322 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 316, + 175, + 322 + ], + "spans": [ + { + "bbox": [ + 114, + 316, + 175, + 322 + ], + "type": "text", + "content": "- Sum: " + }, + { + "bbox": [ + 114, + 316, + 175, + 322 + ], + "type": "inline_equation", + "content": "5 + 8 + 7 = {20}" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 323, + 351, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 323, + 351, + 331 + ], + "spans": [ + { + "bbox": [ + 113, + 323, + 351, + 331 + ], + "type": "text", + "content": "This matches both conditions: all numbers are greater than 3, and they sum to 20." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 113, + 338, + 148, + 346 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 338, + 148, + 346 + ], + "spans": [ + { + "bbox": [ + 113, + 338, + 148, + 346 + ], + "type": "text", + "content": "Option (C):" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 114, + 347, + 199, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 347, + 199, + 354 + ], + "spans": [ + { + "bbox": [ + 114, + 347, + 199, + 354 + ], + "type": "text", + "content": "Stars have values: 5, 3, 7, 2, 8" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 114, + 354, + 281, + 369 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 114, + 354, + 281, + 362 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 354, + 281, + 362 + ], + "spans": [ + { + "bbox": [ + 114, + 354, + 281, + 362 + ], + "type": "text", + "content": "- Are all numbers greater than 3? No (3 and 2 are not " + }, + { + "bbox": [ + 114, + 354, + 281, + 362 + ], + "type": "inline_equation", + "content": ">3" + }, + { + "bbox": [ + 114, + 354, + 281, + 362 + ], + "type": "text", + "content": ")" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 114, + 363, + 195, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 363, + 195, + 369 + ], + "spans": [ + { + "bbox": [ + 114, + 363, + 195, + 369 + ], + "type": "text", + "content": "- Sum: " + }, + { + "bbox": [ + 114, + 363, + 195, + 369 + ], + "type": "inline_equation", + "content": "5 + 3 + 7 + 2 + 8 = {25}" + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 114, + 370, + 361, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 370, + 361, + 379 + ], + "spans": [ + { + "bbox": [ + 114, + 370, + 361, + 379 + ], + "type": "text", + "content": "This doesn't satisfy the first condition about all stars having numbers greater than 3." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 113, + 386, + 148, + 394 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 386, + 148, + 394 + ], + "spans": [ + { + "bbox": [ + 113, + 386, + 148, + 394 + ], + "type": "text", + "content": "Option (D):" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 114, + 395, + 192, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 395, + 192, + 401 + ], + "spans": [ + { + "bbox": [ + 114, + 395, + 192, + 401 + ], + "type": "text", + "content": "Stars have values: 5, 1, 4, 9" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 114, + 402, + 259, + 426 + ], + "type": "list", + "angle": 0, + "index": 39, + "blocks": [ + { + "bbox": [ + 114, + 402, + 259, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 402, + 259, + 410 + ], + "spans": [ + { + "bbox": [ + 114, + 402, + 259, + 410 + ], + "type": "text", + "content": "- Are all numbers greater than 3? No (1 is not " + }, + { + "bbox": [ + 114, + 402, + 259, + 410 + ], + "type": "inline_equation", + "content": ">3" + }, + { + "bbox": [ + 114, + 402, + 259, + 410 + ], + "type": "text", + "content": ")" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 114, + 411, + 186, + 417 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 411, + 186, + 417 + ], + "spans": [ + { + "bbox": [ + 114, + 411, + 186, + 417 + ], + "type": "text", + "content": "- Sum: " + }, + { + "bbox": [ + 114, + 411, + 186, + 417 + ], + "type": "inline_equation", + "content": "5 + 1 + 4 + 9 = {19}" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 114, + 418, + 220, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 418, + 220, + 426 + ], + "spans": [ + { + "bbox": [ + 114, + 418, + 220, + 426 + ], + "type": "text", + "content": "This doesn't satisfy either condition." + } + ] + } + ], + "index": 38 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 434, + 332, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 434, + 332, + 441 + ], + "spans": [ + { + "bbox": [ + 113, + 434, + 332, + 441 + ], + "type": "text", + "content": "So, only Option (B) satisfies both conditions of the Kangaroo constellation." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 114, + 442, + 162, + 449 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 442, + 162, + 449 + ], + "spans": [ + { + "bbox": [ + 114, + 442, + 162, + 449 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 114, + 449, + 251, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 449, + 251, + 457 + ], + "spans": [ + { + "bbox": [ + 114, + 449, + 251, + 457 + ], + "type": "text", + "content": "Solving the Kangaroo Constellation Problem" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 113, + 464, + 388, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 464, + 388, + 474 + ], + "spans": [ + { + "bbox": [ + 113, + 464, + 388, + 474 + ], + "type": "text", + "content": "I need to identify the constellation where all stars have numbers greater than 3 and sum to 20." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 113, + 480, + 199, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 480, + 199, + 490 + ], + "spans": [ + { + "bbox": [ + 113, + 480, + 199, + 490 + ], + "type": "text", + "content": "Let me examine each option:" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 113, + 497, + 157, + 505 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 497, + 157, + 505 + ], + "spans": [ + { + "bbox": [ + 113, + 497, + 157, + 505 + ], + "type": "text", + "content": "Option (A)" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 114, + 506, + 156, + 512 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 506, + 156, + 512 + ], + "spans": [ + { + "bbox": [ + 114, + 506, + 156, + 512 + ], + "type": "text", + "content": "Stars: 5, 4, 7, 6" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 114, + 514, + 185, + 527 + ], + "type": "list", + "angle": 0, + "index": 49, + "blocks": [ + { + "bbox": [ + 114, + 514, + 181, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 514, + 181, + 520 + ], + "spans": [ + { + "bbox": [ + 114, + 514, + 181, + 520 + ], + "type": "text", + "content": "- All numbers " + }, + { + "bbox": [ + 114, + 514, + 181, + 520 + ], + "type": "inline_equation", + "content": "> 3" + }, + { + "bbox": [ + 114, + 514, + 181, + 520 + ], + "type": "text", + "content": "? Yes" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 114, + 521, + 185, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 521, + 185, + 527 + ], + "spans": [ + { + "bbox": [ + 114, + 521, + 185, + 527 + ], + "type": "text", + "content": "- Sum: " + }, + { + "bbox": [ + 114, + 521, + 185, + 527 + ], + "type": "inline_equation", + "content": "5 + 4 + 7 + 6 = {22}" + } + ] + } + ], + "index": 48 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 114, + 529, + 226, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 529, + 226, + 536 + ], + "spans": [ + { + "bbox": [ + 114, + 529, + 226, + 536 + ], + "type": "text", + "content": "Doesn't match the required sum of 20." + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 113, + 544, + 156, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 544, + 156, + 552 + ], + "spans": [ + { + "bbox": [ + 113, + 544, + 156, + 552 + ], + "type": "text", + "content": "Option (B)" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 114, + 553, + 149, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 553, + 149, + 559 + ], + "spans": [ + { + "bbox": [ + 114, + 553, + 149, + 559 + ], + "type": "text", + "content": "Stars: 5, 8, 7" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 114, + 561, + 180, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 561, + 180, + 567 + ], + "spans": [ + { + "bbox": [ + 114, + 561, + 180, + 567 + ], + "type": "text", + "content": "- All numbers " + }, + { + "bbox": [ + 114, + 561, + 180, + 567 + ], + "type": "inline_equation", + "content": "> 3" + }, + { + "bbox": [ + 114, + 561, + 180, + 567 + ], + "type": "text", + "content": "? Yes" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 114, + 568, + 175, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 568, + 175, + 574 + ], + "spans": [ + { + "bbox": [ + 114, + 568, + 175, + 574 + ], + "type": "text", + "content": "- Sum: " + }, + { + "bbox": [ + 114, + 568, + 175, + 574 + ], + "type": "inline_equation", + "content": "5 + 8 + 7 = {20}" + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 114, + 576, + 197, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 576, + 197, + 584 + ], + "spans": [ + { + "bbox": [ + 114, + 576, + 197, + 584 + ], + "type": "text", + "content": "Matches both requirements!" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 114, + 592, + 156, + 600 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 592, + 156, + 600 + ], + "spans": [ + { + "bbox": [ + 114, + 592, + 156, + 600 + ], + "type": "text", + "content": "Option (C)" + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 114, + 601, + 162, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 601, + 162, + 607 + ], + "spans": [ + { + "bbox": [ + 114, + 601, + 162, + 607 + ], + "type": "text", + "content": "Stars: 5, 3, 7, 2, 8" + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 114, + 608, + 227, + 623 + ], + "type": "list", + "angle": 0, + "index": 60, + "blocks": [ + { + "bbox": [ + 114, + 608, + 227, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 608, + 227, + 615 + ], + "spans": [ + { + "bbox": [ + 114, + 608, + 227, + 615 + ], + "type": "text", + "content": "- All numbers " + }, + { + "bbox": [ + 114, + 608, + 227, + 615 + ], + "type": "inline_equation", + "content": "> 3" + }, + { + "bbox": [ + 114, + 608, + 227, + 615 + ], + "type": "text", + "content": "? No (3 and 2 are " + }, + { + "bbox": [ + 114, + 608, + 227, + 615 + ], + "type": "inline_equation", + "content": "\\leq 3" + }, + { + "bbox": [ + 114, + 608, + 227, + 615 + ], + "type": "text", + "content": ")" + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 114, + 616, + 196, + 623 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 616, + 196, + 623 + ], + "spans": [ + { + "bbox": [ + 114, + 616, + 196, + 623 + ], + "type": "text", + "content": "- Sum: " + }, + { + "bbox": [ + 114, + 616, + 196, + 623 + ], + "type": "inline_equation", + "content": "5 + 3 + 7 + 2 + 8 = 25" + } + ] + } + ], + "index": 59 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 114, + 624, + 213, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 624, + 213, + 632 + ], + "spans": [ + { + "bbox": [ + 114, + 624, + 213, + 632 + ], + "type": "text", + "content": "Doesn't satisfy the first condition." + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 113, + 639, + 157, + 648 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 639, + 157, + 648 + ], + "spans": [ + { + "bbox": [ + 113, + 639, + 157, + 648 + ], + "type": "text", + "content": "Option (D)" + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 114, + 649, + 156, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 649, + 156, + 655 + ], + "spans": [ + { + "bbox": [ + 114, + 649, + 156, + 655 + ], + "type": "text", + "content": "Stars: 5, 1, 4, 9" + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 114, + 656, + 205, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 656, + 205, + 663 + ], + "spans": [ + { + "bbox": [ + 114, + 656, + 205, + 663 + ], + "type": "text", + "content": "- All numbers " + }, + { + "bbox": [ + 114, + 656, + 205, + 663 + ], + "type": "inline_equation", + "content": "> 3" + }, + { + "bbox": [ + 114, + 656, + 205, + 663 + ], + "type": "text", + "content": "? No (1 is " + }, + { + "bbox": [ + 114, + 656, + 205, + 663 + ], + "type": "inline_equation", + "content": "\\leq 3" + }, + { + "bbox": [ + 114, + 656, + 205, + 663 + ], + "type": "text", + "content": ")" + } + ] + } + ], + "index": 64 + }, + { + "bbox": [ + 114, + 664, + 185, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 664, + 185, + 670 + ], + "spans": [ + { + "bbox": [ + 114, + 664, + 185, + 670 + ], + "type": "text", + "content": "- Sum: " + }, + { + "bbox": [ + 114, + 664, + 185, + 670 + ], + "type": "inline_equation", + "content": "5 + 1 + 4 + 9 = {19}" + } + ] + } + ], + "index": 65 + }, + { + "bbox": [ + 114, + 671, + 207, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 671, + 207, + 679 + ], + "spans": [ + { + "bbox": [ + 114, + 671, + 207, + 679 + ], + "type": "text", + "content": "Doesn't satisfy either condition." + } + ] + } + ], + "index": 66 + }, + { + "bbox": [ + 113, + 686, + 434, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 686, + 434, + 696 + ], + "spans": [ + { + "bbox": [ + 113, + 686, + 434, + 696 + ], + "type": "text", + "content": "Only option (B) satisfies both requirements: all stars have numbers greater than 3, and their sum is exactly 20." + } + ] + } + ], + "index": 67 + }, + { + "bbox": [ + 114, + 703, + 146, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 703, + 146, + 710 + ], + "spans": [ + { + "bbox": [ + 114, + 703, + 146, + 710 + ], + "type": "text", + "content": "Answer: B" + } + ] + } + ], + "index": 68 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 70 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "bbox": [ + 248, + 88, + 357, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 248, + 88, + 357, + 102 + ], + "spans": [ + { + "bbox": [ + 248, + 88, + 357, + 102 + ], + "type": "text", + "content": "Algorithmic (Medium)" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 362, + 89, + 375, + 100 + ], + "blocks": [ + { + "bbox": [ + 362, + 89, + 375, + 100 + ], + "lines": [ + { + "bbox": [ + 362, + 89, + 375, + 100 + ], + "spans": [ + { + "bbox": [ + 362, + 89, + 375, + 100 + ], + "type": "image", + "image_path": "00e50a1568abcd54465fc320e5e371cbde25a42d012dacc8fef4c74e1687b8cd.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 114, + 125, + 334, + 147 + ], + "blocks": [ + { + "bbox": [ + 114, + 125, + 334, + 147 + ], + "lines": [ + { + "bbox": [ + 114, + 125, + 334, + 147 + ], + "spans": [ + { + "bbox": [ + 114, + 125, + 334, + 147 + ], + "type": "image", + "image_path": "33212890af4e16181bfc285e43be24966ae928823fec25db8cb92706b2348fe9.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 723, + 479, + 736 + ], + "lines": [ + { + "bbox": [ + 130, + 723, + 479, + 736 + ], + "spans": [ + { + "bbox": [ + 130, + 723, + 479, + 736 + ], + "type": "text", + "content": "Figure 16: A sample error case of Algorithmic Reasoning (difficulty: Medium)." + } + ] + } + ], + "index": 47, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 348, + 113, + 484, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 348, + 113, + 484, + 133 + ], + "spans": [ + { + "bbox": [ + 348, + 113, + 484, + 133 + ], + "type": "text", + "content": "Question: How many left hands appear in the image?" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 350, + 133, + 460, + 143 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 350, + 133, + 460, + 143 + ], + "spans": [ + { + "bbox": [ + 350, + 133, + 460, + 143 + ], + "type": "text", + "content": "Options: (A) 1 (B) 8 (C) 4 (D) 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 349, + 148, + 417, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 349, + 148, + 417, + 159 + ], + "spans": [ + { + "bbox": [ + 349, + 148, + 417, + 159 + ], + "type": "text", + "content": "Ground Truth: (C)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 113, + 174, + 252, + 183 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 174, + 252, + 183 + ], + "spans": [ + { + "bbox": [ + 113, + 174, + 252, + 183 + ], + "type": "text", + "content": "Claude-3.7-Sonnet-Thinking: " + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 113, + 183, + 495, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 183, + 495, + 202 + ], + "spans": [ + { + "bbox": [ + 113, + 183, + 495, + 202 + ], + "type": "text", + "content": "Looking at the image, I can see approximately 10-12 hand icons/emoji in a row. They're all yellow (with one that appears to be skin-colored), and they show various hand gestures." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 113, + 209, + 495, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 209, + 495, + 228 + ], + "spans": [ + { + "bbox": [ + 113, + 209, + 495, + 228 + ], + "type": "text", + "content": "To determine which ones are left hands, I need to look at the thumb position. In a left hand, the thumb would typically be on the right side when the palm is facing forward." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 113, + 236, + 268, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 236, + 268, + 246 + ], + "spans": [ + { + "bbox": [ + 113, + 236, + 268, + 246 + ], + "type": "text", + "content": "Let me go through each hand from left to right:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 113, + 254, + 457, + 351 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 113, + 254, + 365, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 254, + 365, + 262 + ], + "spans": [ + { + "bbox": [ + 113, + 254, + 365, + 262 + ], + "type": "text", + "content": "1. Thumbs down gesture - appears to be a left hand with thumb on right side" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 114, + 263, + 388, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 263, + 388, + 271 + ], + "spans": [ + { + "bbox": [ + 114, + 263, + 388, + 271 + ], + "type": "text", + "content": "2. Fist or closed hand - orientation is harder to determine but seems to be a left hand" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 114, + 272, + 298, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 272, + 298, + 280 + ], + "spans": [ + { + "bbox": [ + 114, + 272, + 298, + 280 + ], + "type": "text", + "content": "3. Open hand or waving hand - appears to be a left hand" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 114, + 281, + 298, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 281, + 298, + 289 + ], + "spans": [ + { + "bbox": [ + 114, + 281, + 298, + 289 + ], + "type": "text", + "content": "4. Hand pointing or gesturing - appears to be a left hand" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 114, + 289, + 310, + 298 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 289, + 310, + 298 + ], + "spans": [ + { + "bbox": [ + 114, + 289, + 310, + 298 + ], + "type": "text", + "content": "5. Open hand with fingers spread - appears to be a left hand" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 114, + 298, + 457, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 298, + 457, + 307 + ], + "spans": [ + { + "bbox": [ + 114, + 298, + 457, + 307 + ], + "type": "text", + "content": "6. Fist with extended thumb (like \"thumbs up\" but horizontal) - appears to be a right hand (thumb on left)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 114, + 308, + 310, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 308, + 310, + 316 + ], + "spans": [ + { + "bbox": [ + 114, + 308, + 310, + 316 + ], + "type": "text", + "content": "7. Open hand with fingers spread - appears to be a left hand" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 114, + 316, + 297, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 316, + 297, + 324 + ], + "spans": [ + { + "bbox": [ + 114, + 316, + 297, + 324 + ], + "type": "text", + "content": "8. Hand making \"OK\" gesture - appears to be a left hand" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 114, + 325, + 288, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 325, + 288, + 333 + ], + "spans": [ + { + "bbox": [ + 114, + 325, + 288, + 333 + ], + "type": "text", + "content": "9. Victory/peace sign hand - appears to be a left hand" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 114, + 334, + 225, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 334, + 225, + 342 + ], + "spans": [ + { + "bbox": [ + 114, + 334, + 225, + 342 + ], + "type": "text", + "content": "10. Ear or side of face (not a hand)" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 114, + 342, + 306, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 342, + 306, + 351 + ], + "spans": [ + { + "bbox": [ + 114, + 342, + 306, + 351 + ], + "type": "text", + "content": "11. Hand pointing or gesturing - appears to be a right hand" + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 359, + 435, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 359, + 435, + 369 + ], + "spans": [ + { + "bbox": [ + 113, + 359, + 435, + 369 + ], + "type": "text", + "content": "So counting the left hands, I see approximately 8 left hands. This would correspond to option (B) 8." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 113, + 376, + 495, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 376, + 495, + 395 + ], + "spans": [ + { + "bbox": [ + 113, + 376, + 495, + 395 + ], + "type": "text", + "content": "However, I want to be careful because determining left vs. right can sometimes be ambiguous in emoji or simplistic hand representations. Let me double check my count." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 113, + 403, + 495, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 403, + 495, + 430 + ], + "spans": [ + { + "bbox": [ + 113, + 403, + 495, + 430 + ], + "type": "text", + "content": "Looking again, I see the thumbs down, several open hands, some gesture hands, and a few that are definitely left hands based on thumb position. The total count of left hands appears to be around 8, though there's some ambiguity with a couple of the representations." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 113, + 438, + 288, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 438, + 288, + 448 + ], + "spans": [ + { + "bbox": [ + 113, + 438, + 288, + 448 + ], + "type": "text", + "content": "Based on my analysis, the answer appears to be (B) 8." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 114, + 449, + 167, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 449, + 167, + 457 + ], + "spans": [ + { + "bbox": [ + 114, + 449, + 167, + 457 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 113, + 457, + 443, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 457, + 443, + 466 + ], + "spans": [ + { + "bbox": [ + 113, + 457, + 443, + 466 + ], + "type": "text", + "content": "To solve this question, I need to examine the image carefully and identify which hands are left hands." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 113, + 473, + 495, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 473, + 495, + 501 + ], + "spans": [ + { + "bbox": [ + 113, + 473, + 495, + 501 + ], + "type": "text", + "content": "Looking at the row of yellow hand emojis, I need to determine whether each is a left or right hand. For hand emojis, I can identify a left hand by noticing the thumb position - on a left hand, the thumb will typically appear on the right side when the palm is facing forward." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 113, + 510, + 257, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 510, + 257, + 518 + ], + "spans": [ + { + "bbox": [ + 113, + 510, + 257, + 518 + ], + "type": "text", + "content": "Going through each emoji from left to right:" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 114, + 519, + 361, + 615 + ], + "type": "list", + "angle": 0, + "index": 42, + "blocks": [ + { + "bbox": [ + 114, + 519, + 306, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 519, + 306, + 527 + ], + "spans": [ + { + "bbox": [ + 114, + 519, + 306, + 527 + ], + "type": "text", + "content": "1. Thumbs down - this is a left hand (thumb is on the right)" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 114, + 528, + 220, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 528, + 220, + 535 + ], + "spans": [ + { + "bbox": [ + 114, + 528, + 220, + 535 + ], + "type": "text", + "content": "2. Fist - appears to be a left hand" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 114, + 536, + 227, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 536, + 227, + 544 + ], + "spans": [ + { + "bbox": [ + 114, + 536, + 227, + 544 + ], + "type": "text", + "content": "3. Waving hand - this is a left hand" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 114, + 544, + 230, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 544, + 230, + 552 + ], + "spans": [ + { + "bbox": [ + 114, + 544, + 230, + 552 + ], + "type": "text", + "content": "4. Pointing hand - this is a left hand" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 114, + 553, + 285, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 553, + 285, + 561 + ], + "spans": [ + { + "bbox": [ + 114, + 553, + 285, + 561 + ], + "type": "text", + "content": "5. Open hand with fingers spread - this is a left hand" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 114, + 562, + 361, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 562, + 361, + 571 + ], + "spans": [ + { + "bbox": [ + 114, + 562, + 361, + 571 + ], + "type": "text", + "content": "6. Fist with extended thumb - this appears to be a right hand (thumb on left)" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 114, + 571, + 220, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 571, + 220, + 578 + ], + "spans": [ + { + "bbox": [ + 114, + 571, + 220, + 578 + ], + "type": "text", + "content": "7. Open hand - this is a left hand" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 114, + 579, + 239, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 579, + 239, + 587 + ], + "spans": [ + { + "bbox": [ + 114, + 579, + 239, + 587 + ], + "type": "text", + "content": "8. OK gesture hand - this is a left hand" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 114, + 588, + 236, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 588, + 236, + 597 + ], + "spans": [ + { + "bbox": [ + 114, + 588, + 236, + 597 + ], + "type": "text", + "content": "9. Peace sign hand - this is a left hand" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 114, + 597, + 198, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 597, + 198, + 605 + ], + "spans": [ + { + "bbox": [ + 114, + 597, + 198, + 605 + ], + "type": "text", + "content": "10. Ear emoji (not a hand)" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 114, + 606, + 219, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 606, + 219, + 615 + ], + "spans": [ + { + "bbox": [ + 114, + 606, + 219, + 615 + ], + "type": "text", + "content": "11. Pointing gesture - right hand" + } + ] + } + ], + "index": 41 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 623, + 237, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 623, + 237, + 633 + ], + "spans": [ + { + "bbox": [ + 113, + 623, + 237, + 633 + ], + "type": "text", + "content": "I count 8 left hands total in the image." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 113, + 640, + 149, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 640, + 149, + 649 + ], + "spans": [ + { + "bbox": [ + 113, + 640, + 149, + 649 + ], + "type": "text", + "content": "Answer: B" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 113, + 661, + 269, + 670 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 661, + 269, + 670 + ], + "spans": [ + { + "bbox": [ + 113, + 661, + 269, + 670 + ], + "type": "text", + "content": "Error Category: Spatial / Orientation Error" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 113, + 671, + 495, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 671, + 495, + 709 + ], + "spans": [ + { + "bbox": [ + 113, + 671, + 495, + 709 + ], + "type": "text", + "content": "Error Reason: The model makes a orientation error by misinterpreting the thumb positions in several hand emojis, leading it to incorrectly classify some right hands as left hands. For example, it counts the pointing hand in the third emoji (which is clearly a right hand) as a left hand. This misclassification results in an overcount of left hands." + } + ] + } + ], + "index": 46 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 48 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "bbox": [ + 248, + 88, + 357, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 248, + 88, + 357, + 102 + ], + "spans": [ + { + "bbox": [ + 248, + 88, + 357, + 102 + ], + "type": "text", + "content": "Algorithmic (Medium)" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 127, + 106, + 218, + 198 + ], + "blocks": [ + { + "bbox": [ + 127, + 106, + 218, + 198 + ], + "lines": [ + { + "bbox": [ + 127, + 106, + 218, + 198 + ], + "spans": [ + { + "bbox": [ + 127, + 106, + 218, + 198 + ], + "type": "image", + "image_path": "1ab5d39c309739928dbdbad3db6a7908d5d62c04378b3c3dfc2525e0114c5e01.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 126, + 723, + 482, + 736 + ], + "lines": [ + { + "bbox": [ + 126, + 723, + 482, + 736 + ], + "spans": [ + { + "bbox": [ + 126, + 723, + 482, + 736 + ], + "type": "text", + "content": "Figure 17: A sample correct case of Algorithmic Reasoning (difficulty: Medium)." + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 231, + 127, + 485, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 127, + 485, + 168 + ], + "spans": [ + { + "bbox": [ + 231, + 127, + 485, + 168 + ], + "type": "text", + "content": "Question: The entire pie seen in the image is divided among several children. Each child receives a piece of pie, and each piece of pie looks identical. What is the maximum possible number of children there? Options: (A) 9 (B) 8 (C) 10 (D) 4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 233, + 178, + 301, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 178, + 301, + 190 + ], + "spans": [ + { + "bbox": [ + 233, + 178, + 301, + 190 + ], + "type": "text", + "content": "Ground Truth: (B)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 112, + 209, + 497, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 209, + 497, + 240 + ], + "spans": [ + { + "bbox": [ + 112, + 209, + 497, + 240 + ], + "type": "text", + "content": "Claude-3.7-Sonnet-Thinking: I need to determine how many equal-sized pieces this pie can be divided into. Since each child will receive an identical piece, I need to count the maximum number of equal portions visible in the image." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 112, + 248, + 495, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 248, + 495, + 269 + ], + "spans": [ + { + "bbox": [ + 112, + 248, + 495, + 269 + ], + "type": "text", + "content": "Looking at the image, I can see that this appears to be an unusual-looking pie with different colored sections arranged in a circular pattern. It's not a typical pie with simple wedge-shaped slices." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 112, + 277, + 495, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 277, + 495, + 308 + ], + "spans": [ + { + "bbox": [ + 112, + 277, + 495, + 308 + ], + "type": "text", + "content": "From what I can observe, there appears to be a pattern of colored shapes (green, light green, blue, and red) arranged in a circle. To determine how many equal pieces there are, I need to identify the repeating pattern and count how many times it repeats around the circle." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 112, + 316, + 495, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 316, + 495, + 346 + ], + "spans": [ + { + "bbox": [ + 112, + 316, + 495, + 346 + ], + "type": "text", + "content": "The pie seems to have a symmetrical arrangement. Looking carefully, I can see that there appear to be 8 sections around the outer ring that follow a similar pattern. I can see repeated patterns of green and light green shapes with blue and red accents between them." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 113, + 355, + 198, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 355, + 198, + 365 + ], + "spans": [ + { + "bbox": [ + 113, + 355, + 198, + 365 + ], + "type": "text", + "content": "Looking at the options:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 114, + 365, + 137, + 403 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 114, + 365, + 134, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 365, + 134, + 374 + ], + "spans": [ + { + "bbox": [ + 114, + 365, + 134, + 374 + ], + "type": "text", + "content": "(A) 9" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 115, + 375, + 132, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 375, + 132, + 384 + ], + "spans": [ + { + "bbox": [ + 115, + 375, + 132, + 384 + ], + "type": "text", + "content": "(B) 8" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 115, + 384, + 137, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 384, + 137, + 394 + ], + "spans": [ + { + "bbox": [ + 115, + 384, + 137, + 394 + ], + "type": "text", + "content": "(C) 10" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 115, + 395, + 133, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 395, + 133, + 403 + ], + "spans": [ + { + "bbox": [ + 115, + 395, + 133, + 403 + ], + "type": "text", + "content": "(D) 4" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 413, + 495, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 413, + 495, + 433 + ], + "spans": [ + { + "bbox": [ + 113, + 413, + 495, + 433 + ], + "type": "text", + "content": "Based on my observation of the image, there appear to be 8 similar sections arranged around the circle. This would suggest that option (B) 8 is the correct answer." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 112, + 441, + 495, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 441, + 495, + 463 + ], + "spans": [ + { + "bbox": [ + 112, + 441, + 495, + 463 + ], + "type": "text", + "content": "However, I should double-check by looking for any other patterns or symmetries in the image. It's possible that each section could be further divided, or that there's a different way to interpret the equal pieces." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 112, + 471, + 495, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 471, + 495, + 491 + ], + "spans": [ + { + "bbox": [ + 112, + 471, + 495, + 491 + ], + "type": "text", + "content": "After careful examination, I'm confident that there are 8 identical sections arranged in a circular pattern. Therefore, the maximum number of children who could receive identical pieces of pie would be 8." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 114, + 491, + 172, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 491, + 172, + 501 + ], + "spans": [ + { + "bbox": [ + 114, + 491, + 172, + 501 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 114, + 501, + 295, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 501, + 295, + 510 + ], + "spans": [ + { + "bbox": [ + 114, + 501, + 295, + 510 + ], + "type": "text", + "content": "Determining the Maximum Number of Children" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 112, + 519, + 495, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 519, + 495, + 539 + ], + "spans": [ + { + "bbox": [ + 112, + 519, + 495, + 539 + ], + "type": "text", + "content": "To solve this problem, I need to examine the pie in the image and determine how many identical pieces it can be divided into." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 113, + 548, + 194, + 558 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 548, + 194, + 558 + ], + "spans": [ + { + "bbox": [ + 113, + 548, + 194, + 558 + ], + "type": "text", + "content": "Analysis of the Pie" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 112, + 559, + 495, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 559, + 495, + 578 + ], + "spans": [ + { + "bbox": [ + 112, + 559, + 495, + 578 + ], + "type": "text", + "content": "Looking at the image, I can see a circular pie with a pattern of colored sections. The key is to identify how many identical sections are present or could be created." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 112, + 587, + 495, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 587, + 495, + 616 + ], + "spans": [ + { + "bbox": [ + 112, + 587, + 495, + 616 + ], + "type": "text", + "content": "Upon careful examination, I notice that the pie has a symmetrical arrangement with a repeating pattern. The colorful segments (green, light green, red, and blue shapes) appear to form a consistent pattern around the circle." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 112, + 626, + 495, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 626, + 495, + 647 + ], + "spans": [ + { + "bbox": [ + 112, + 626, + 495, + 647 + ], + "type": "text", + "content": "Counting these repeating sections carefully, I can identify 8 identical portions around the circumference of the pie. Each of these sections follows the same pattern of colored shapes." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 113, + 655, + 168, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 655, + 168, + 664 + ], + "spans": [ + { + "bbox": [ + 113, + 655, + 168, + 664 + ], + "type": "text", + "content": "Conclusion" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 112, + 665, + 495, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 665, + 495, + 685 + ], + "spans": [ + { + "bbox": [ + 112, + 665, + 495, + 685 + ], + "type": "text", + "content": "Since each child must receive an identical piece, and there are 8 identical sections visible in the pie, the maximum possible number of children would be 8." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 113, + 693, + 153, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 693, + 153, + 703 + ], + "spans": [ + { + "bbox": [ + 113, + 693, + 153, + 703 + ], + "type": "text", + "content": "Answer: B" + } + ] + } + ], + "index": 27 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 211, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 211, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 211, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "30" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "bbox": [ + 256, + 88, + 349, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 256, + 88, + 349, + 102 + ], + "spans": [ + { + "bbox": [ + 256, + 88, + 349, + 102 + ], + "type": "text", + "content": "Algorithmic (Hard)" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 363, + 89, + 375, + 101 + ], + "blocks": [ + { + "bbox": [ + 363, + 89, + 375, + 101 + ], + "lines": [ + { + "bbox": [ + 363, + 89, + 375, + 101 + ], + "spans": [ + { + "bbox": [ + 363, + 89, + 375, + 101 + ], + "type": "image", + "image_path": "927d08a0f0f6c9898c84461b46688bcabf55b744090097079b653255cfc913a2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 130, + 106, + 214, + 195 + ], + "blocks": [ + { + "bbox": [ + 130, + 106, + 214, + 195 + ], + "lines": [ + { + "bbox": [ + 130, + 106, + 214, + 195 + ], + "spans": [ + { + "bbox": [ + 130, + 106, + 214, + 195 + ], + "type": "image", + "image_path": "090c5d20e7dbcee00cc16fc3ac0259c5d9be49d302ec9a733870fb9c076fd3e8.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 138, + 723, + 471, + 736 + ], + "lines": [ + { + "bbox": [ + 138, + 723, + 471, + 736 + ], + "spans": [ + { + "bbox": [ + 138, + 723, + 471, + 736 + ], + "type": "text", + "content": "Figure 18: A sample error case of Algorithmic Reasoning (difficulty: Hard)." + } + ] + } + ], + "index": 70, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 224, + 121, + 484, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 224, + 121, + 484, + 140 + ], + "spans": [ + { + "bbox": [ + 224, + 121, + 484, + 140 + ], + "type": "text", + "content": "Question: There are 53 residents living in the apartment. How many people live on the second floor?" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 225, + 141, + 345, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 225, + 141, + 345, + 152 + ], + "spans": [ + { + "bbox": [ + 225, + 141, + 345, + 152 + ], + "type": "text", + "content": "Options: (A) 5 (B) 8 (C) 10 (D) 13" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 225, + 156, + 294, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 225, + 156, + 294, + 168 + ], + "spans": [ + { + "bbox": [ + 225, + 156, + 294, + 168 + ], + "type": "text", + "content": "Ground Truth: (A)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 113, + 196, + 497, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 196, + 497, + 210 + ], + "spans": [ + { + "bbox": [ + 113, + 196, + 497, + 210 + ], + "type": "text", + "content": "Claude-3.7-Sonnet-Thinking: This image shows an apartment building with different sections labeled with numbers. Let me analyze what the numbers represent:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 113, + 214, + 324, + 245 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 113, + 214, + 222, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 214, + 222, + 221 + ], + "spans": [ + { + "bbox": [ + 113, + 214, + 222, + 221 + ], + "type": "text", + "content": "- The building appears to have multiple floors." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 114, + 221, + 257, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 221, + 257, + 228 + ], + "spans": [ + { + "bbox": [ + 114, + 221, + 257, + 228 + ], + "type": "text", + "content": "- The number 27 is shown on the left side of one section (blue)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 114, + 228, + 324, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 228, + 324, + 233 + ], + "spans": [ + { + "bbox": [ + 114, + 228, + 324, + 233 + ], + "type": "text", + "content": "- The number 23 is shown on the right side of a section (also seems to cover two blue floors)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 114, + 233, + 297, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 233, + 297, + 239 + ], + "spans": [ + { + "bbox": [ + 114, + 233, + 297, + 239 + ], + "type": "text", + "content": "- The number 18 is shown on the left side of a pink/red section marked with \"?\"" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 114, + 239, + 257, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 239, + 257, + 245 + ], + "spans": [ + { + "bbox": [ + 114, + 239, + 257, + 245 + ], + "type": "text", + "content": "- The number 13 is shown on the right side of the bottom floor." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 251, + 454, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 251, + 454, + 259 + ], + "spans": [ + { + "bbox": [ + 113, + 251, + 454, + 259 + ], + "type": "text", + "content": "The question asks how many people live on the second floor, but I need to determine which floor is the second floor and how many people live there." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 113, + 263, + 214, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 263, + 214, + 270 + ], + "spans": [ + { + "bbox": [ + 113, + 263, + 214, + 270 + ], + "type": "text", + "content": "Looking at the building from bottom to top:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 114, + 270, + 271, + 295 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 114, + 270, + 263, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 270, + 263, + 276 + ], + "spans": [ + { + "bbox": [ + 114, + 270, + 263, + 276 + ], + "type": "text", + "content": "1. Bottom floor (blue with windows): labeled with 13 on the right" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 114, + 276, + 256, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 276, + 256, + 282 + ], + "spans": [ + { + "bbox": [ + 114, + 276, + 256, + 282 + ], + "type": "text", + "content": "2. Second floor (pink/red with \"?\"): labeled with 18 on the left" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 114, + 282, + 271, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 282, + 271, + 289 + ], + "spans": [ + { + "bbox": [ + 114, + 282, + 271, + 289 + ], + "type": "text", + "content": "3. Two middle floors (blue with windows): part of the 23 on the right" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 114, + 289, + 251, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 289, + 251, + 295 + ], + "spans": [ + { + "bbox": [ + 114, + 289, + 251, + 295 + ], + "type": "text", + "content": "4. Top floor (blue with windows): labeled with 27 on the left" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 300, + 497, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 300, + 497, + 314 + ], + "spans": [ + { + "bbox": [ + 113, + 300, + 497, + 314 + ], + "type": "text", + "content": "So it seems the second floor from the bottom is the one marked with " + }, + { + "bbox": [ + 113, + 300, + 497, + 314 + ], + "type": "inline_equation", + "content": "?^{\\prime \\prime}" + }, + { + "bbox": [ + 113, + 300, + 497, + 314 + ], + "type": "text", + "content": " and has 18 on the left side. But the question asks specifically how many people live on the second floor, not how many are associated with the label." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 113, + 319, + 354, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 319, + 354, + 327 + ], + "spans": [ + { + "bbox": [ + 113, + 319, + 354, + 327 + ], + "type": "text", + "content": "Let me think about this more carefully. The total number of residents is 53. The numbers shown might be:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 114, + 327, + 249, + 350 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 114, + 327, + 204, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 327, + 204, + 332 + ], + "spans": [ + { + "bbox": [ + 114, + 327, + 204, + 332 + ], + "type": "text", + "content": "- 27: Number of people on the top floor" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 114, + 332, + 249, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 332, + 249, + 338 + ], + "spans": [ + { + "bbox": [ + 114, + 332, + 249, + 338 + ], + "type": "text", + "content": "- 23: Number of people on the two middle floors combined" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 114, + 338, + 244, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 338, + 244, + 345 + ], + "spans": [ + { + "bbox": [ + 114, + 338, + 244, + 345 + ], + "type": "text", + "content": "- 18: Number of people on the second floor" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 114, + 345, + 213, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 345, + 213, + 350 + ], + "spans": [ + { + "bbox": [ + 114, + 345, + 213, + 350 + ], + "type": "text", + "content": "- 13: Number of people on the bottom floor" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 356, + 280, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 356, + 280, + 363 + ], + "spans": [ + { + "bbox": [ + 113, + 356, + 280, + 363 + ], + "type": "text", + "content": "Let's check if these add up: " + }, + { + "bbox": [ + 113, + 356, + 280, + 363 + ], + "type": "inline_equation", + "content": "27 + 23 + 18 + 13 = 81" + }, + { + "bbox": [ + 113, + 356, + 280, + 363 + ], + "type": "text", + "content": ", which is more than 53." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 113, + 369, + 260, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 369, + 260, + 376 + ], + "spans": [ + { + "bbox": [ + 113, + 369, + 260, + 376 + ], + "type": "text", + "content": "Wait, I think I'm misinterpreting. The numbers might represent:" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 114, + 376, + 214, + 400 + ], + "type": "list", + "angle": 0, + "index": 34, + "blocks": [ + { + "bbox": [ + 114, + 376, + 197, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 376, + 197, + 381 + ], + "spans": [ + { + "bbox": [ + 114, + 376, + 197, + 381 + ], + "type": "text", + "content": "- 27: People on left side (top section)" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 114, + 381, + 214, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 381, + 214, + 388 + ], + "spans": [ + { + "bbox": [ + 114, + 381, + 214, + 388 + ], + "type": "text", + "content": "- 23: People on right side (upper two floors)" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 114, + 388, + 203, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 388, + 203, + 394 + ], + "spans": [ + { + "bbox": [ + 114, + 388, + 203, + 394 + ], + "type": "text", + "content": "- 18: People on left side (lower section)" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 114, + 394, + 205, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 394, + 205, + 400 + ], + "spans": [ + { + "bbox": [ + 114, + 394, + 205, + 400 + ], + "type": "text", + "content": "- 13: People on right side (bottom floor)" + } + ] + } + ], + "index": 33 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 405, + 402, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 405, + 402, + 413 + ], + "spans": [ + { + "bbox": [ + 113, + 405, + 402, + 413 + ], + "type": "text", + "content": "So " + }, + { + "bbox": [ + 113, + 405, + 402, + 413 + ], + "type": "inline_equation", + "content": "27 + 18 = 45" + }, + { + "bbox": [ + 113, + 405, + 402, + 413 + ], + "type": "text", + "content": " people on the left, and " + }, + { + "bbox": [ + 113, + 405, + 402, + 413 + ], + "type": "inline_equation", + "content": "23 + 13 = 36" + }, + { + "bbox": [ + 113, + 405, + 402, + 413 + ], + "type": "text", + "content": " people on the right. That's still " + }, + { + "bbox": [ + 113, + 405, + 402, + 413 + ], + "type": "inline_equation", + "content": "45 + 36 = 81" + }, + { + "bbox": [ + 113, + 405, + 402, + 413 + ], + "type": "text", + "content": " people, which doesn't match 53." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 113, + 418, + 496, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 418, + 496, + 432 + ], + "spans": [ + { + "bbox": [ + 113, + 418, + 496, + 432 + ], + "type": "text", + "content": "Let me try a different approach. Maybe the 23 represents the number of people on the right side of the building (covering multiple floors), and 18 represents people on another part." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 113, + 436, + 239, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 436, + 239, + 443 + ], + "spans": [ + { + "bbox": [ + 113, + 436, + 239, + 443 + ], + "type": "text", + "content": "Actually, looking at the diagram again, it appears that:" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 114, + 443, + 201, + 467 + ], + "type": "list", + "angle": 0, + "index": 42, + "blocks": [ + { + "bbox": [ + 114, + 443, + 187, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 443, + 187, + 449 + ], + "spans": [ + { + "bbox": [ + 114, + 443, + 187, + 449 + ], + "type": "text", + "content": "- 27: Left side of the top section" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 114, + 449, + 201, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 449, + 201, + 455 + ], + "spans": [ + { + "bbox": [ + 114, + 449, + 201, + 455 + ], + "type": "text", + "content": "- 23: Right side of the top two sections" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 114, + 455, + 194, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 455, + 194, + 461 + ], + "spans": [ + { + "bbox": [ + 114, + 455, + 194, + 461 + ], + "type": "text", + "content": "- 18: Left side of the bottom section" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 114, + 461, + 198, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 461, + 198, + 467 + ], + "spans": [ + { + "bbox": [ + 114, + 461, + 198, + 467 + ], + "type": "text", + "content": "- 13: Right side of the bottom section" + } + ] + } + ], + "index": 41 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 473, + 411, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 473, + 411, + 481 + ], + "spans": [ + { + "bbox": [ + 113, + 473, + 411, + 481 + ], + "type": "text", + "content": "Given the total is 53 residents, and we're trying to find the number on the second floor, let's try to interpret the diagram differently." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 113, + 485, + 225, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 485, + 225, + 492 + ], + "spans": [ + { + "bbox": [ + 113, + 485, + 225, + 492 + ], + "type": "text", + "content": "The first (bottom) floor seems to have 13 people." + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 114, + 492, + 195, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 492, + 195, + 498 + ], + "spans": [ + { + "bbox": [ + 114, + 492, + 195, + 498 + ], + "type": "text", + "content": "The top appears to have 27 people." + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 114, + 498, + 288, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 498, + 288, + 505 + ], + "spans": [ + { + "bbox": [ + 114, + 498, + 288, + 505 + ], + "type": "text", + "content": "That leaves " + }, + { + "bbox": [ + 114, + 498, + 288, + 505 + ], + "type": "inline_equation", + "content": "53 - 13 - 27 = 13" + }, + { + "bbox": [ + 114, + 498, + 288, + 505 + ], + "type": "text", + "content": " people for the second and third floors combined." + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 113, + 510, + 496, + 536 + ], + "type": "list", + "angle": 0, + "index": 50, + "blocks": [ + { + "bbox": [ + 113, + 510, + 496, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 510, + 496, + 524 + ], + "spans": [ + { + "bbox": [ + 113, + 510, + 496, + 524 + ], + "type": "text", + "content": "I'm still uncertain, but based on my calculations and the available options, it seems like (D) 13 might be the answer if we interpret the second floor as having all the remaining residents after accounting for the top and bottom floors." + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 114, + 524, + 152, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 524, + 152, + 530 + ], + "spans": [ + { + "bbox": [ + 114, + 524, + 152, + 530 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 114, + 530, + 215, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 530, + 215, + 536 + ], + "spans": [ + { + "bbox": [ + 114, + 530, + 215, + 536 + ], + "type": "text", + "content": "Solving the Apartment Residents Problem" + } + ] + } + ], + "index": 49 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 540, + 374, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 540, + 374, + 548 + ], + "spans": [ + { + "bbox": [ + 113, + 540, + 374, + 548 + ], + "type": "text", + "content": "Looking at the image, I need to determine how many residents live on the second floor of this apartment building." + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 113, + 553, + 241, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 553, + 241, + 559 + ], + "spans": [ + { + "bbox": [ + 113, + 553, + 241, + 559 + ], + "type": "text", + "content": "I can see a diagram of a building with several numbers:" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 114, + 559, + 303, + 591 + ], + "type": "list", + "angle": 0, + "index": 58, + "blocks": [ + { + "bbox": [ + 114, + 559, + 207, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 559, + 207, + 566 + ], + "spans": [ + { + "bbox": [ + 114, + 559, + 207, + 566 + ], + "type": "text", + "content": "- The top section has \"27\" on the left side" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 114, + 566, + 229, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 566, + 229, + 572 + ], + "spans": [ + { + "bbox": [ + 114, + 566, + 229, + 572 + ], + "type": "text", + "content": "- The middle two floors have \"23\" on the right side" + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 114, + 572, + 220, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 572, + 220, + 578 + ], + "spans": [ + { + "bbox": [ + 114, + 572, + 220, + 578 + ], + "type": "text", + "content": "- There's a section marked with \"18\" on the left." + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 114, + 578, + 215, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 578, + 215, + 585 + ], + "spans": [ + { + "bbox": [ + 114, + 578, + 215, + 585 + ], + "type": "text", + "content": "- The bottom floor has \"13\" on the right side" + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 114, + 585, + 303, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 585, + 303, + 591 + ], + "spans": [ + { + "bbox": [ + 114, + 585, + 303, + 591 + ], + "type": "text", + "content": "- The second floor from the bottom is marked with a \"?\" - this is what I need to find" + } + ] + } + ], + "index": 57 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 597, + 312, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 597, + 312, + 605 + ], + "spans": [ + { + "bbox": [ + 113, + 597, + 312, + 605 + ], + "type": "text", + "content": "I notice the question states there are 53 residents total living in the apartment building." + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 113, + 609, + 164, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 609, + 164, + 615 + ], + "spans": [ + { + "bbox": [ + 113, + 609, + 164, + 615 + ], + "type": "text", + "content": "Let me work this out:" + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 114, + 616, + 299, + 634 + ], + "type": "list", + "angle": 0, + "index": 64, + "blocks": [ + { + "bbox": [ + 114, + 616, + 214, + 621 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 616, + 214, + 621 + ], + "spans": [ + { + "bbox": [ + 114, + 616, + 214, + 621 + ], + "type": "text", + "content": "- The top floor appears to have 27 residents" + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 114, + 621, + 223, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 621, + 223, + 628 + ], + "spans": [ + { + "bbox": [ + 114, + 621, + 223, + 628 + ], + "type": "text", + "content": "- The bottom floor appears to have 13 residents" + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 114, + 628, + 299, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 628, + 299, + 634 + ], + "spans": [ + { + "bbox": [ + 114, + 628, + 299, + 634 + ], + "type": "text", + "content": "- That means the middle two floors together must have: " + }, + { + "bbox": [ + 114, + 628, + 299, + 634 + ], + "type": "inline_equation", + "content": "{53} - {27} - {13} = {13}" + }, + { + "bbox": [ + 114, + 628, + 299, + 634 + ], + "type": "text", + "content": " residents" + } + ] + } + ], + "index": 63 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 639, + 435, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 639, + 435, + 647 + ], + "spans": [ + { + "bbox": [ + 113, + 639, + 435, + 647 + ], + "type": "text", + "content": "Looking closely at the image, the second floor is specifically marked with a \"?\" and there are numbers on the sides indicating 13 for that floor." + } + ] + } + ], + "index": 65 + }, + { + "bbox": [ + 113, + 651, + 215, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 651, + 215, + 658 + ], + "spans": [ + { + "bbox": [ + 113, + 651, + 215, + 658 + ], + "type": "text", + "content": "Therefore, the second floor has 13 residents." + } + ] + } + ], + "index": 66 + }, + { + "bbox": [ + 114, + 664, + 140, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 664, + 140, + 670 + ], + "spans": [ + { + "bbox": [ + 114, + 664, + 140, + 670 + ], + "type": "text", + "content": "Answer: D" + } + ] + } + ], + "index": 67 + }, + { + "bbox": [ + 113, + 673, + 249, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 673, + 249, + 683 + ], + "spans": [ + { + "bbox": [ + 113, + 673, + 249, + 683 + ], + "type": "text", + "content": "Error Category: Understanding Error" + } + ] + } + ], + "index": 68 + }, + { + "bbox": [ + 113, + 683, + 497, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 683, + 497, + 712 + ], + "spans": [ + { + "bbox": [ + 113, + 683, + 497, + 712 + ], + "type": "text", + "content": "Error Reason: The model makes an image understanding error by misunderstanding what is the meaning of the brackets and numbers, despite all the branching and revalidation. This misunderstanding causes it to iteratively try to understand the meaning of the numbers and ultimately select the wrong answer." + } + ] + } + ], + "index": 69 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "31" + } + ] + } + ], + "index": 71 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 30 + }, + { + "para_blocks": [ + { + "bbox": [ + 256, + 88, + 348, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 256, + 88, + 348, + 102 + ], + "spans": [ + { + "bbox": [ + 256, + 88, + 348, + 102 + ], + "type": "text", + "content": "Algorithmic (Hard)" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 363, + 89, + 375, + 100 + ], + "blocks": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "lines": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "spans": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "type": "image", + "image_path": "734da95c949dd5902eb030ed4246a550a978e842247fed2f79fc705914fc1123.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 125, + 108, + 194, + 176 + ], + "blocks": [ + { + "bbox": [ + 125, + 108, + 194, + 176 + ], + "lines": [ + { + "bbox": [ + 125, + 108, + 194, + 176 + ], + "spans": [ + { + "bbox": [ + 125, + 108, + 194, + 176 + ], + "type": "image", + "image_path": "85d2f65accb24a4a7760e618083811214a75b10243babb7aedceef3ce06aab19.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 134, + 723, + 476, + 736 + ], + "lines": [ + { + "bbox": [ + 134, + 723, + 476, + 736 + ], + "spans": [ + { + "bbox": [ + 134, + 723, + 476, + 736 + ], + "type": "text", + "content": "Figure 19: A sample correct case of Algorithmic Reasoning (difficulty: Hard)." + } + ] + } + ], + "index": 44, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 216, + 114, + 485, + 143 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 216, + 114, + 485, + 143 + ], + "spans": [ + { + "bbox": [ + 216, + 114, + 485, + 143 + ], + "type": "text", + "content": "Question: A bird can only move horizontally or vertically towards a monkey in the grid. The number of ways that the bird can reach the monkey is:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 217, + 144, + 345, + 155 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 217, + 144, + 345, + 155 + ], + "spans": [ + { + "bbox": [ + 217, + 144, + 345, + 155 + ], + "type": "text", + "content": "Options: (A) 15 (B) 14 (C) 18 (D) 12" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 217, + 161, + 287, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 217, + 161, + 287, + 172 + ], + "spans": [ + { + "bbox": [ + 217, + 161, + 287, + 172 + ], + "type": "text", + "content": "Ground Truth: (A)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 112, + 182, + 496, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 182, + 496, + 202 + ], + "spans": [ + { + "bbox": [ + 112, + 182, + 496, + 202 + ], + "type": "text", + "content": "Claude-3.7-Sonnet-Thinking: Looking at the grid, I need to identify the positions of the bird and the monkey." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 112, + 209, + 497, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 209, + 497, + 228 + ], + "spans": [ + { + "bbox": [ + 112, + 209, + 497, + 228 + ], + "type": "text", + "content": "The bird appears to be at the top of the grid in the second column, specifically at position " + }, + { + "bbox": [ + 112, + 209, + 497, + 228 + ], + "type": "inline_equation", + "content": "(0,1)" + }, + { + "bbox": [ + 112, + 209, + 497, + 228 + ], + "type": "text", + "content": " if we use a zero-indexed grid counting from the top-left." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 113, + 236, + 469, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 236, + 469, + 246 + ], + "spans": [ + { + "bbox": [ + 113, + 236, + 469, + 246 + ], + "type": "text", + "content": "The monkey appears to be at the bottom-right of the grid, at position " + }, + { + "bbox": [ + 113, + 236, + 469, + 246 + ], + "type": "inline_equation", + "content": "(4,3)" + }, + { + "bbox": [ + 113, + 236, + 469, + 246 + ], + "type": "text", + "content": " using the same coordinate system." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 112, + 253, + 495, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 253, + 495, + 281 + ], + "spans": [ + { + "bbox": [ + 112, + 253, + 495, + 281 + ], + "type": "text", + "content": "To count the number of ways the bird can reach the monkey, I need to consider that the bird can only move horizontally or vertically. This means the bird can only move right or down to reach the monkey from its starting position." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 113, + 288, + 485, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 288, + 485, + 299 + ], + "spans": [ + { + "bbox": [ + 113, + 288, + 485, + 299 + ], + "type": "text", + "content": "The total number of valid paths will be the number of ways to arrange the required horizontal and vertical moves." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 113, + 306, + 285, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 306, + 285, + 315 + ], + "spans": [ + { + "bbox": [ + 113, + 306, + 285, + 315 + ], + "type": "text", + "content": "From position " + }, + { + "bbox": [ + 113, + 306, + 285, + 315 + ], + "type": "inline_equation", + "content": "(0,1)" + }, + { + "bbox": [ + 113, + 306, + 285, + 315 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 113, + 306, + 285, + 315 + ], + "type": "inline_equation", + "content": "(4,3)" + }, + { + "bbox": [ + 113, + 306, + 285, + 315 + ], + "type": "text", + "content": ", the bird needs to move:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 113, + 316, + 253, + 334 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 113, + 316, + 233, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 316, + 233, + 324 + ], + "spans": [ + { + "bbox": [ + 113, + 316, + 233, + 324 + ], + "type": "text", + "content": "- 4 steps down (from row 0 to row 4)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 113, + 324, + 253, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 324, + 253, + 334 + ], + "spans": [ + { + "bbox": [ + 113, + 324, + 253, + 334 + ], + "type": "text", + "content": "- 2 steps right (from column 1 to column 3)" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 112, + 341, + 424, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 341, + 424, + 351 + ], + "spans": [ + { + "bbox": [ + 112, + 341, + 424, + 351 + ], + "type": "text", + "content": "So in total, the bird needs to make " + }, + { + "bbox": [ + 112, + 341, + 424, + 351 + ], + "type": "inline_equation", + "content": "4 + 2 = 6" + }, + { + "bbox": [ + 112, + 341, + 424, + 351 + ], + "type": "text", + "content": " moves, with 4 being vertical and 2 being horizontal." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 113, + 359, + 381, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 359, + 381, + 368 + ], + "spans": [ + { + "bbox": [ + 113, + 359, + 381, + 368 + ], + "type": "text", + "content": "The number of ways to arrange these moves is given by the combination formula:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 113, + 368, + 394, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 368, + 394, + 377 + ], + "spans": [ + { + "bbox": [ + 113, + 368, + 394, + 377 + ], + "type": "text", + "content": "C(total moves, horizontal moves) = C(6, 2) = 6! / (2! * 4!) = (6 * 5) / (2 * 1) = 30 / 2 = 15" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 113, + 385, + 353, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 385, + 353, + 395 + ], + "spans": [ + { + "bbox": [ + 113, + 385, + 353, + 395 + ], + "type": "text", + "content": "Alternatively, you can calculate " + }, + { + "bbox": [ + 113, + 385, + 353, + 395 + ], + "type": "inline_equation", + "content": "C(6, 4)" + }, + { + "bbox": [ + 113, + 385, + 353, + 395 + ], + "type": "text", + "content": " which would give the same result:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 113, + 395, + 197, + 403 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 395, + 197, + 403 + ], + "spans": [ + { + "bbox": [ + 113, + 395, + 197, + 403 + ], + "type": "interline_equation", + "content": "C (6, 4) = 6! / (4! * 2!) = 1 5", + "image_path": "6d90ec4e3b20979b3232678bb8d97c8893f3506db7b9b76a3694d3800717de81.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 113, + 411, + 318, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 411, + 318, + 422 + ], + "spans": [ + { + "bbox": [ + 113, + 411, + 318, + 422 + ], + "type": "text", + "content": "So there are 15 different ways for the bird to reach the monkey." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 113, + 429, + 302, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 429, + 302, + 438 + ], + "spans": [ + { + "bbox": [ + 113, + 429, + 302, + 438 + ], + "type": "text", + "content": "Looking at the options, this corresponds to answer (A) 15." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 113, + 438, + 167, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 438, + 167, + 447 + ], + "spans": [ + { + "bbox": [ + 113, + 438, + 167, + 447 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 113, + 447, + 221, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 447, + 221, + 456 + ], + "spans": [ + { + "bbox": [ + 113, + 447, + 221, + 456 + ], + "type": "text", + "content": "Solving the Grid Path Problem" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 112, + 464, + 497, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 464, + 497, + 483 + ], + "spans": [ + { + "bbox": [ + 112, + 464, + 497, + 483 + ], + "type": "text", + "content": "In this problem, I need to find the number of ways that a bird can reach a monkey by moving only horizontally or vertically in a grid." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 113, + 491, + 186, + 500 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 491, + 186, + 500 + ], + "spans": [ + { + "bbox": [ + 113, + 491, + 186, + 500 + ], + "type": "text", + "content": "Looking at the image:" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 113, + 500, + 455, + 518 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 113, + 500, + 455, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 500, + 455, + 510 + ], + "spans": [ + { + "bbox": [ + 113, + 500, + 455, + 510 + ], + "type": "text", + "content": "- The bird is located at the top of the grid in the second column (position (0,1) if we start counting from 0)" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 113, + 510, + 320, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 510, + 320, + 518 + ], + "spans": [ + { + "bbox": [ + 113, + 510, + 320, + 518 + ], + "type": "text", + "content": "- The monkey is located at the bottom-right area (position (4,3))" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 526, + 249, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 526, + 249, + 535 + ], + "spans": [ + { + "bbox": [ + 113, + 526, + 249, + 535 + ], + "type": "text", + "content": "Step 1: Determine the required moves" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 113, + 536, + 288, + 562 + ], + "type": "list", + "angle": 0, + "index": 34, + "blocks": [ + { + "bbox": [ + 113, + 536, + 288, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 536, + 288, + 544 + ], + "spans": [ + { + "bbox": [ + 113, + 536, + 288, + 544 + ], + "type": "text", + "content": "From the bird to the monkey, the bird needs to move:" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 114, + 544, + 234, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 544, + 234, + 553 + ], + "spans": [ + { + "bbox": [ + 114, + 544, + 234, + 553 + ], + "type": "text", + "content": "- 4 steps down (from row 0 to row 4)" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 114, + 553, + 253, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 553, + 253, + 562 + ], + "spans": [ + { + "bbox": [ + 114, + 553, + 253, + 562 + ], + "type": "text", + "content": "- 2 steps right (from column 1 to column 3)" + } + ] + } + ], + "index": 33 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 570, + 337, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 570, + 337, + 579 + ], + "spans": [ + { + "bbox": [ + 113, + 570, + 337, + 579 + ], + "type": "text", + "content": "So the bird must make a total of 6 moves: 4 vertical and 2 horizontal." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 113, + 587, + 276, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 587, + 276, + 597 + ], + "spans": [ + { + "bbox": [ + 113, + 587, + 276, + 597 + ], + "type": "text", + "content": "## Step 2: Calculate the number of possible paths" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 113, + 597, + 427, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 597, + 427, + 606 + ], + "spans": [ + { + "bbox": [ + 113, + 597, + 427, + 606 + ], + "type": "text", + "content": "This is a classic combinatorial problem. The number of different ways to arrange these moves is:" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 113, + 614, + 289, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 614, + 289, + 624 + ], + "spans": [ + { + "bbox": [ + 113, + 614, + 289, + 624 + ], + "type": "text", + "content": "C(total moves, horizontal moves) = C(6,2) = 6! / (2! " + }, + { + "bbox": [ + 113, + 614, + 289, + 624 + ], + "type": "inline_equation", + "content": "\\times" + }, + { + "bbox": [ + 113, + 614, + 289, + 624 + ], + "type": "text", + "content": " 4!)" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 114, + 614, + 289, + 648 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 614, + 289, + 648 + ], + "spans": [ + { + "bbox": [ + 114, + 614, + 289, + 648 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} = (6 \\times 5) / (2 \\times 1) \\\\ = 3 0 / 2 \\\\ = 1 5 \\\\ \\end{array}", + "image_path": "f37da933fc159e721e2936c6b277bfbc2689af3de9753bc8def52900ab628793.jpg" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 113, + 658, + 335, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 658, + 335, + 668 + ], + "spans": [ + { + "bbox": [ + 113, + 658, + 335, + 668 + ], + "type": "text", + "content": "Alternatively, we could calculate " + }, + { + "bbox": [ + 113, + 658, + 335, + 668 + ], + "type": "inline_equation", + "content": "\\mathrm{C}(6,4)" + }, + { + "bbox": [ + 113, + 658, + 335, + 668 + ], + "type": "text", + "content": " which gives the same result." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 113, + 676, + 244, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 676, + 244, + 685 + ], + "spans": [ + { + "bbox": [ + 113, + 676, + 244, + 685 + ], + "type": "text", + "content": "## Step 3: Verify with the given options" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 113, + 685, + 237, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 685, + 237, + 694 + ], + "spans": [ + { + "bbox": [ + 113, + 685, + 237, + 694 + ], + "type": "text", + "content": "Our answer of 15 matches option (A)." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 114, + 702, + 151, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 702, + 151, + 711 + ], + "spans": [ + { + "bbox": [ + 114, + 702, + 151, + 711 + ], + "type": "text", + "content": "Answer: A" + } + ] + } + ], + "index": 43 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "32" + } + ] + } + ], + "index": 45 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 31 + }, + { + "para_blocks": [ + { + "bbox": [ + 261, + 89, + 345, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 261, + 89, + 345, + 102 + ], + "spans": [ + { + "bbox": [ + 261, + 89, + 345, + 102 + ], + "type": "text", + "content": "Analogical (Easy)" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 363, + 89, + 375, + 100 + ], + "blocks": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "lines": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "spans": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "type": "image", + "image_path": "e518d4acbe9c161cd715fed617ffb95611fdb16cf38907210ed5d5f595521048.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 126, + 108, + 149, + 133 + ], + "blocks": [ + { + "bbox": [ + 126, + 108, + 149, + 133 + ], + "lines": [ + { + "bbox": [ + 126, + 108, + 149, + 133 + ], + "spans": [ + { + "bbox": [ + 126, + 108, + 149, + 133 + ], + "type": "image", + "image_path": "50a2de794b432270d8a6b8a4ca03979f06543861e5246d77de2f2322303eec57.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 142, + 723, + 466, + 736 + ], + "lines": [ + { + "bbox": [ + 142, + 723, + 466, + 736 + ], + "spans": [ + { + "bbox": [ + 142, + 723, + 466, + 736 + ], + "type": "text", + "content": "Figure 20: A sample error case of Analogical Reasoning (difficulty: Easy)." + } + ] + } + ], + "index": 85, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 126, + 133, + 148, + 155 + ], + "blocks": [ + { + "bbox": [ + 126, + 133, + 148, + 155 + ], + "lines": [ + { + "bbox": [ + 126, + 133, + 148, + 155 + ], + "spans": [ + { + "bbox": [ + 126, + 133, + 148, + 155 + ], + "type": "image", + "image_path": "a77154c58642491a23c268c4034f10ad4af28ba62a0d416a6bd8022d2dd274b2.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 152, + 109, + 175, + 132 + ], + "blocks": [ + { + "bbox": [ + 152, + 109, + 175, + 132 + ], + "lines": [ + { + "bbox": [ + 152, + 109, + 175, + 132 + ], + "spans": [ + { + "bbox": [ + 152, + 109, + 175, + 132 + ], + "type": "image", + "image_path": "cb35d65666a6596b675ccdc24c9b37f588de9281849111d31cf59dc8561dd8a1.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 154, + 133, + 174, + 155 + ], + "blocks": [ + { + "bbox": [ + 154, + 133, + 174, + 155 + ], + "lines": [ + { + "bbox": [ + 154, + 133, + 174, + 155 + ], + "spans": [ + { + "bbox": [ + 154, + 133, + 174, + 155 + ], + "type": "image", + "image_path": "22e8f2fd45414ea0c02565acbba730dd18db188566730e74d38f7073ac825006.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 176, + 109, + 200, + 132 + ], + "blocks": [ + { + "bbox": [ + 176, + 109, + 200, + 132 + ], + "lines": [ + { + "bbox": [ + 176, + 109, + 200, + 132 + ], + "spans": [ + { + "bbox": [ + 176, + 109, + 200, + 132 + ], + "type": "image", + "image_path": "2c357401c5509f0dfd36a58990216a6180a467958210780c06b3d61a2f2e3160.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 177, + 137, + 186, + 145 + ], + "blocks": [ + { + "bbox": [ + 177, + 137, + 186, + 145 + ], + "lines": [ + { + "bbox": [ + 177, + 137, + 186, + 145 + ], + "spans": [ + { + "bbox": [ + 177, + 137, + 186, + 145 + ], + "type": "image", + "image_path": "37364e6ffedfd53d5cf11984921f5c210b210bd629162fe84fa6477d697c9de7.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 187, + 137, + 197, + 145 + ], + "blocks": [ + { + "bbox": [ + 187, + 137, + 197, + 145 + ], + "lines": [ + { + "bbox": [ + 187, + 137, + 197, + 145 + ], + "spans": [ + { + "bbox": [ + 187, + 137, + 197, + 145 + ], + "type": "image", + "image_path": "0920247e737549ac6c013dd4ad786a1d0456780992c80a82459f78cbac53edd4.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 115, + 161, + 133, + 181 + ], + "blocks": [ + { + "bbox": [ + 115, + 161, + 133, + 181 + ], + "lines": [ + { + "bbox": [ + 115, + 161, + 133, + 181 + ], + "spans": [ + { + "bbox": [ + 115, + 161, + 133, + 181 + ], + "type": "image", + "image_path": "df192506b0a870a0c7543522cc6648f7a5cebc30bdfbff972c7e7ef764491a13.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 140, + 159, + 149, + 181 + ], + "blocks": [ + { + "bbox": [ + 140, + 159, + 149, + 181 + ], + "lines": [ + { + "bbox": [ + 140, + 159, + 149, + 181 + ], + "spans": [ + { + "bbox": [ + 140, + 159, + 149, + 181 + ], + "type": "image", + "image_path": "27c1ab9e19183598421b47bc2b2cde9cfa0eca5758064122469fefc4dd9aa1ec.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 152, + 159, + 174, + 181 + ], + "blocks": [ + { + "bbox": [ + 152, + 159, + 174, + 181 + ], + "lines": [ + { + "bbox": [ + 152, + 159, + 174, + 181 + ], + "spans": [ + { + "bbox": [ + 152, + 159, + 174, + 181 + ], + "type": "image", + "image_path": "b9bd0cb26aec3f688bb02c915e6e3c6a947257aafe514b16d19b7342827dc8a7.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 176, + 159, + 186, + 181 + ], + "blocks": [ + { + "bbox": [ + 176, + 159, + 186, + 181 + ], + "lines": [ + { + "bbox": [ + 176, + 159, + 186, + 181 + ], + "spans": [ + { + "bbox": [ + 176, + 159, + 186, + 181 + ], + "type": "image", + "image_path": "eb994d835f7c12f06003e9f62bc37518d1ff14925e962e33d938b47b4b9b5b19.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 194, + 159, + 214, + 181 + ], + "blocks": [ + { + "bbox": [ + 194, + 159, + 214, + 181 + ], + "lines": [ + { + "bbox": [ + 194, + 159, + 214, + 181 + ], + "spans": [ + { + "bbox": [ + 194, + 159, + 214, + 181 + ], + "type": "image", + "image_path": "d16e3f610ac02e622ac34890433e4efd1957e013f904088e7fb2cd385a5816c5.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 225, + 121, + 486, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 225, + 121, + 486, + 150 + ], + "spans": [ + { + "bbox": [ + 225, + 121, + 486, + 150 + ], + "type": "text", + "content": "Question: Given the pattern in the first line of blocks, which option at the bottom of the image fits in the question mark in the second line of blocks?" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 225, + 156, + 294, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 225, + 156, + 294, + 167 + ], + "spans": [ + { + "bbox": [ + 225, + 156, + 294, + 167 + ], + "type": "text", + "content": "Ground Truth: (C)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 113, + 186, + 443, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 186, + 443, + 194 + ], + "spans": [ + { + "bbox": [ + 113, + 186, + 443, + 194 + ], + "type": "text", + "content": "Claude-3.7-Sonnet-Thinking: Let me analyze the pattern in the first line of blocks and see how it relates to the second line of blocks." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 113, + 199, + 159, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 199, + 159, + 205 + ], + "spans": [ + { + "bbox": [ + 113, + 199, + 159, + 205 + ], + "type": "text", + "content": "First line of blocks:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 114, + 206, + 249, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 206, + 249, + 212 + ], + "spans": [ + { + "bbox": [ + 114, + 206, + 249, + 212 + ], + "type": "text", + "content": "1. The first block has white on the left and blue on the right" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 114, + 213, + 246, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 213, + 246, + 219 + ], + "spans": [ + { + "bbox": [ + 114, + 213, + 246, + 219 + ], + "type": "text", + "content": "2. The second block has white on the top and blue on the" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 114, + 219, + 251, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 219, + 251, + 225 + ], + "spans": [ + { + "bbox": [ + 114, + 219, + 251, + 225 + ], + "type": "text", + "content": "3. The third block has blue on the left and white on the right" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 114, + 231, + 165, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 231, + 165, + 236 + ], + "spans": [ + { + "bbox": [ + 114, + 231, + 165, + 236 + ], + "type": "text", + "content": "Second line of blocks:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 114, + 237, + 282, + 255 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 114, + 237, + 260, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 237, + 260, + 243 + ], + "spans": [ + { + "bbox": [ + 114, + 237, + 260, + 243 + ], + "type": "text", + "content": "1. The first block has black on the top and yellow on the bottom" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 114, + 243, + 261, + 250 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 243, + 261, + 250 + ], + "spans": [ + { + "bbox": [ + 114, + 243, + 261, + 250 + ], + "type": "text", + "content": "2. The second block has yellow on the left and black on the right" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 114, + 250, + 282, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 250, + 282, + 255 + ], + "spans": [ + { + "bbox": [ + 114, + 250, + 282, + 255 + ], + "type": "text", + "content": "3. The third block has a question mark (this is what we need to figure out)" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 261, + 294, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 261, + 294, + 268 + ], + "spans": [ + { + "bbox": [ + 113, + 261, + 294, + 268 + ], + "type": "text", + "content": "Let me think about the possible relationships between the first and second line:" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 113, + 274, + 194, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 274, + 194, + 279 + ], + "spans": [ + { + "bbox": [ + 113, + 274, + 194, + 279 + ], + "type": "text", + "content": "Possibility 1: Color transformation" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 114, + 281, + 245, + 293 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 114, + 281, + 238, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 281, + 238, + 286 + ], + "spans": [ + { + "bbox": [ + 114, + 281, + 238, + 286 + ], + "type": "text", + "content": "- Blue in the first line becomes black in the second line" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 114, + 287, + 245, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 287, + 245, + 293 + ], + "spans": [ + { + "bbox": [ + 114, + 287, + 245, + 293 + ], + "type": "text", + "content": "- White in the first line becomes yellow in the second line" + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 298, + 182, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 298, + 182, + 305 + ], + "spans": [ + { + "bbox": [ + 113, + 298, + 182, + 305 + ], + "type": "text", + "content": "Possibility 2: Position pattern" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 114, + 305, + 305, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 305, + 305, + 312 + ], + "spans": [ + { + "bbox": [ + 114, + 305, + 305, + 312 + ], + "type": "text", + "content": "The first line blocks follow a pattern of right/bottom/left coloring for the blue part." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 114, + 312, + 318, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 312, + 318, + 317 + ], + "spans": [ + { + "bbox": [ + 114, + 312, + 318, + 317 + ], + "type": "text", + "content": "The second line blocks might follow the same positional pattern but with different colors." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 114, + 317, + 496, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 317, + 496, + 330 + ], + "spans": [ + { + "bbox": [ + 114, + 317, + 496, + 330 + ], + "type": "text", + "content": "Based on the color transformation pattern I identified (blue→black, white→yellow), and looking at the third block in the first line (blue left, white right), the answer should be a block with black on the left and yellow on the right." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 114, + 335, + 168, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 335, + 168, + 342 + ], + "spans": [ + { + "bbox": [ + 114, + 335, + 168, + 342 + ], + "type": "text", + "content": "Looking at the options:" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 114, + 342, + 329, + 366 + ], + "type": "list", + "angle": 0, + "index": 41, + "blocks": [ + { + "bbox": [ + 114, + 342, + 299, + 349 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 342, + 299, + 349 + ], + "spans": [ + { + "bbox": [ + 114, + 342, + 299, + 349 + ], + "type": "text", + "content": "- Option A: Black on top, yellow on bottom (like the first block in the second row)" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 114, + 349, + 286, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 349, + 286, + 354 + ], + "spans": [ + { + "bbox": [ + 114, + 349, + 286, + 354 + ], + "type": "text", + "content": "- Option B: White on left, pink/red on right (doesn't match our color scheme)" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 114, + 355, + 329, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 355, + 329, + 361 + ], + "spans": [ + { + "bbox": [ + 114, + 355, + 329, + 361 + ], + "type": "text", + "content": "- Option C: Yellow on top, black on bottom (this would be inverse of first block in second row)" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 114, + 361, + 309, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 361, + 309, + 366 + ], + "spans": [ + { + "bbox": [ + 114, + 361, + 309, + 366 + ], + "type": "text", + "content": "- Option D: Blue on bottom, white on top (similar to the second block in the first row)" + } + ] + } + ], + "index": 40 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 372, + 475, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 372, + 475, + 380 + ], + "spans": [ + { + "bbox": [ + 113, + 372, + 475, + 380 + ], + "type": "text", + "content": "None of the options exactly match what I'm looking for. The relationship between the first line and second line might be more complex. Let me look at it again:" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 113, + 384, + 253, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 384, + 253, + 392 + ], + "spans": [ + { + "bbox": [ + 113, + 384, + 253, + 392 + ], + "type": "text", + "content": "First block: white left, blue right " + }, + { + "bbox": [ + 113, + 384, + 253, + 392 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 113, + 384, + 253, + 392 + ], + "type": "text", + "content": " black top, yellow bottom" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 113, + 392, + 258, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 392, + 258, + 398 + ], + "spans": [ + { + "bbox": [ + 113, + 392, + 258, + 398 + ], + "type": "text", + "content": "Second block: white top, blue bottom " + }, + { + "bbox": [ + 113, + 392, + 258, + 398 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 113, + 392, + 258, + 398 + ], + "type": "text", + "content": " yellow left, black right" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 114, + 403, + 272, + 428 + ], + "type": "list", + "angle": 0, + "index": 49, + "blocks": [ + { + "bbox": [ + 114, + 403, + 184, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 403, + 184, + 409 + ], + "spans": [ + { + "bbox": [ + 114, + 403, + 184, + 409 + ], + "type": "text", + "content": "Let me try to formulate a rule:" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 114, + 410, + 181, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 410, + 181, + 415 + ], + "spans": [ + { + "bbox": [ + 114, + 410, + 181, + 415 + ], + "type": "text", + "content": "-White areas become yellow" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 114, + 416, + 173, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 416, + 173, + 421 + ], + "spans": [ + { + "bbox": [ + 114, + 416, + 173, + 421 + ], + "type": "text", + "content": "- Blue areas become black" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 114, + 422, + 272, + 428 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 422, + 272, + 428 + ], + "spans": [ + { + "bbox": [ + 114, + 422, + 272, + 428 + ], + "type": "text", + "content": "- Position changes: left→top, right→bottom, top→left, bottom→right" + } + ] + } + ], + "index": 48 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 434, + 460, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 434, + 460, + 441 + ], + "spans": [ + { + "bbox": [ + 113, + 434, + 460, + 441 + ], + "type": "text", + "content": "So if the third block has blue left and white right, then the third block in the second row should have black top and yellow bottom according to this rule." + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 113, + 446, + 184, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 446, + 184, + 453 + ], + "spans": [ + { + "bbox": [ + 113, + 446, + 184, + 453 + ], + "type": "text", + "content": "Option A matches this pattern." + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 114, + 459, + 164, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 459, + 164, + 464 + ], + "spans": [ + { + "bbox": [ + 114, + 459, + 164, + 464 + ], + "type": "text", + "content": "Let me double-check:" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 114, + 465, + 219, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 465, + 219, + 471 + ], + "spans": [ + { + "bbox": [ + 114, + 465, + 219, + 471 + ], + "type": "text", + "content": "First rule pattern: white→yellow, blue→black" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 114, + 471, + 285, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 471, + 285, + 478 + ], + "spans": [ + { + "bbox": [ + 114, + 471, + 285, + 478 + ], + "type": "text", + "content": "Position transformation: left " + }, + { + "bbox": [ + 114, + 471, + 285, + 478 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 114, + 471, + 285, + 478 + ], + "type": "text", + "content": " top, right " + }, + { + "bbox": [ + 114, + 471, + 285, + 478 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 114, + 471, + 285, + 478 + ], + "type": "text", + "content": " bottom, top " + }, + { + "bbox": [ + 114, + 471, + 285, + 478 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 114, + 471, + 285, + 478 + ], + "type": "text", + "content": " left, bottom " + }, + { + "bbox": [ + 114, + 471, + 285, + 478 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 114, + 471, + 285, + 478 + ], + "type": "text", + "content": " right" + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 114, + 483, + 223, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 483, + 223, + 489 + ], + "spans": [ + { + "bbox": [ + 114, + 483, + 223, + 489 + ], + "type": "text", + "content": "For block 3 in row 1: blue on left, white on right" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 114, + 490, + 306, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 490, + 306, + 495 + ], + "spans": [ + { + "bbox": [ + 114, + 490, + 306, + 495 + ], + "type": "text", + "content": "Applying the rules: blue left becomes black top, white right becomes yellow bottom" + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 114, + 496, + 205, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 496, + 205, + 502 + ], + "spans": [ + { + "bbox": [ + 114, + 496, + 205, + 502 + ], + "type": "text", + "content": "This gives us: black top, yellow bottom" + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 114, + 502, + 317, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 502, + 317, + 508 + ], + "spans": [ + { + "bbox": [ + 114, + 502, + 317, + 508 + ], + "type": "text", + "content": "Option A has black on top and yellow on the bottom, so it seems to be the correct answer." + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 114, + 509, + 151, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 509, + 151, + 514 + ], + "spans": [ + { + "bbox": [ + 114, + 509, + 151, + 514 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 114, + 515, + 381, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 515, + 381, + 521 + ], + "spans": [ + { + "bbox": [ + 114, + 515, + 381, + 521 + ], + "type": "text", + "content": "To solve this problem, I need to identify the pattern in the first line of blocks and how it transforms in the second line." + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 114, + 526, + 196, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 526, + 196, + 533 + ], + "spans": [ + { + "bbox": [ + 114, + 526, + 196, + 533 + ], + "type": "text", + "content": "Let me analyze both rows carefully:" + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 114, + 539, + 137, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 539, + 137, + 544 + ], + "spans": [ + { + "bbox": [ + 114, + 539, + 137, + 544 + ], + "type": "text", + "content": "First row:" + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 114, + 545, + 205, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 545, + 205, + 551 + ], + "spans": [ + { + "bbox": [ + 114, + 545, + 205, + 551 + ], + "type": "text", + "content": "- Left block: White on left, blue on right" + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 114, + 552, + 218, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 552, + 218, + 557 + ], + "spans": [ + { + "bbox": [ + 114, + 552, + 218, + 557 + ], + "type": "text", + "content": "- Middle block: White on top, blue on bottom" + } + ] + } + ], + "index": 64 + }, + { + "bbox": [ + 114, + 558, + 208, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 558, + 208, + 563 + ], + "spans": [ + { + "bbox": [ + 114, + 558, + 208, + 563 + ], + "type": "text", + "content": "-Right block:Blue on left, white on right" + } + ] + } + ], + "index": 65 + }, + { + "bbox": [ + 114, + 570, + 143, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 570, + 143, + 574 + ], + "spans": [ + { + "bbox": [ + 114, + 570, + 143, + 574 + ], + "type": "text", + "content": "Second row:" + } + ] + } + ], + "index": 66 + }, + { + "bbox": [ + 114, + 576, + 216, + 594 + ], + "type": "list", + "angle": 0, + "index": 70, + "blocks": [ + { + "bbox": [ + 114, + 576, + 215, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 576, + 215, + 582 + ], + "spans": [ + { + "bbox": [ + 114, + 576, + 215, + 582 + ], + "type": "text", + "content": "- Left block: Black on top, yellow on bottom" + } + ] + } + ], + "index": 67 + }, + { + "bbox": [ + 114, + 583, + 216, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 583, + 216, + 589 + ], + "spans": [ + { + "bbox": [ + 114, + 583, + 216, + 589 + ], + "type": "text", + "content": "- Middle block: Yellow on left, black on right" + } + ] + } + ], + "index": 68 + }, + { + "bbox": [ + 114, + 589, + 216, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 589, + 216, + 594 + ], + "spans": [ + { + "bbox": [ + 114, + 589, + 216, + 594 + ], + "type": "text", + "content": "- Right block: ? (what we need to determine)" + } + ] + } + ], + "index": 69 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 114, + 601, + 241, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 601, + 241, + 606 + ], + "spans": [ + { + "bbox": [ + 114, + 601, + 241, + 606 + ], + "type": "text", + "content": "Looking at the transformation from first to second row:" + } + ] + } + ], + "index": 71 + }, + { + "bbox": [ + 114, + 607, + 287, + 632 + ], + "type": "list", + "angle": 0, + "index": 76, + "blocks": [ + { + "bbox": [ + 114, + 607, + 280, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 607, + 280, + 613 + ], + "spans": [ + { + "bbox": [ + 114, + 607, + 280, + 613 + ], + "type": "text", + "content": "1. The blue areas in the first row seem to become black in the second row" + } + ] + } + ], + "index": 72 + }, + { + "bbox": [ + 114, + 613, + 287, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 613, + 287, + 619 + ], + "spans": [ + { + "bbox": [ + 114, + 613, + 287, + 619 + ], + "type": "text", + "content": "2. The white areas in the first row seem to become yellow in the second row" + } + ] + } + ], + "index": 73 + }, + { + "bbox": [ + 114, + 620, + 211, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 620, + 211, + 625 + ], + "spans": [ + { + "bbox": [ + 114, + 620, + 211, + 625 + ], + "type": "text", + "content": "3. There's also a positional transformation:" + } + ] + } + ], + "index": 74 + }, + { + "bbox": [ + 114, + 626, + 178, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 626, + 178, + 632 + ], + "spans": [ + { + "bbox": [ + 114, + 626, + 178, + 632 + ], + "type": "text", + "content": "- Left side " + }, + { + "bbox": [ + 114, + 626, + 178, + 632 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 114, + 626, + 178, + 632 + ], + "type": "text", + "content": " Top position" + } + ] + } + ], + "index": 75 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 118, + 632, + 190, + 649 + ], + "type": "list", + "angle": 0, + "index": 80, + "blocks": [ + { + "bbox": [ + 118, + 632, + 189, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 632, + 189, + 637 + ], + "spans": [ + { + "bbox": [ + 118, + 632, + 189, + 637 + ], + "type": "text", + "content": "-Right side " + }, + { + "bbox": [ + 118, + 632, + 189, + 637 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 118, + 632, + 189, + 637 + ], + "type": "text", + "content": " Bottom position" + } + ] + } + ], + "index": 77 + }, + { + "bbox": [ + 118, + 638, + 176, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 638, + 176, + 643 + ], + "spans": [ + { + "bbox": [ + 118, + 638, + 176, + 643 + ], + "type": "text", + "content": "-Top position " + }, + { + "bbox": [ + 118, + 638, + 176, + 643 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 118, + 638, + 176, + 643 + ], + "type": "text", + "content": " Left side" + } + ] + } + ], + "index": 78 + }, + { + "bbox": [ + 118, + 644, + 190, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 644, + 190, + 649 + ], + "spans": [ + { + "bbox": [ + 118, + 644, + 190, + 649 + ], + "type": "text", + "content": "- Bottom position " + }, + { + "bbox": [ + 118, + 644, + 190, + 649 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 118, + 644, + 190, + 649 + ], + "type": "text", + "content": " Right side" + } + ] + } + ], + "index": 79 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 114, + 650, + 318, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 650, + 318, + 656 + ], + "spans": [ + { + "bbox": [ + 114, + 650, + 318, + 656 + ], + "type": "text", + "content": "Option A shows black on top and yellow on bottom, which matches our expected pattern." + } + ] + } + ], + "index": 81 + }, + { + "bbox": [ + 114, + 662, + 139, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 662, + 139, + 668 + ], + "spans": [ + { + "bbox": [ + 114, + 662, + 139, + 668 + ], + "type": "text", + "content": "Answer: B" + } + ] + } + ], + "index": 82 + }, + { + "bbox": [ + 113, + 673, + 231, + 682 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 673, + 231, + 682 + ], + "spans": [ + { + "bbox": [ + 113, + 673, + 231, + 682 + ], + "type": "text", + "content": "Error Category: Reasoning Error" + } + ] + } + ], + "index": 83 + }, + { + "bbox": [ + 113, + 682, + 496, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 682, + 496, + 711 + ], + "spans": [ + { + "bbox": [ + 113, + 682, + 496, + 711 + ], + "type": "text", + "content": "Error Reason: The model gets this question wrong because it focuses too much on individual color mappings or partial position patterns rather than identifying the full transformation. The correct pattern involves a consistent clockwise rotation of color positions from the first to the second row." + } + ] + } + ], + "index": 84 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "33" + } + ] + } + ], + "index": 86 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 32 + }, + { + "para_blocks": [ + { + "bbox": [ + 261, + 89, + 345, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 261, + 89, + 345, + 102 + ], + "spans": [ + { + "bbox": [ + 261, + 89, + 345, + 102 + ], + "type": "text", + "content": "Analogical (Easy)" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 363, + 89, + 375, + 100 + ], + "blocks": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "lines": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "spans": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "type": "image", + "image_path": "61cad466b49690891b856ba14cc70440f36a35be4a0a036b722eeab6b87a80a2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 138, + 723, + 471, + 736 + ], + "lines": [ + { + "bbox": [ + 138, + 723, + 471, + 736 + ], + "spans": [ + { + "bbox": [ + 138, + 723, + 471, + 736 + ], + "type": "text", + "content": "Figure 21: A sample correct case of Analogical Reasoning (difficulty: Easy)." + } + ] + } + ], + "index": 57, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 113, + 110, + 268, + 144 + ], + "blocks": [ + { + "bbox": [ + 113, + 110, + 268, + 144 + ], + "lines": [ + { + "bbox": [ + 113, + 110, + 268, + 144 + ], + "spans": [ + { + "bbox": [ + 113, + 110, + 268, + 144 + ], + "type": "image", + "image_path": "10bed72f87c8e193995a37a5c5e14a781a3051a3d0f0558297a496509e6fabfd.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 114, + 159, + 249, + 186 + ], + "blocks": [ + { + "bbox": [ + 114, + 159, + 249, + 186 + ], + "lines": [ + { + "bbox": [ + 114, + 159, + 249, + 186 + ], + "spans": [ + { + "bbox": [ + 114, + 159, + 249, + 186 + ], + "type": "image", + "image_path": "d7598ff388d40ce2b7daff4e08614c80aa41b7b49ae51fea453b622c3c9556ad.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 136, + 194, + 179, + 229 + ], + "blocks": [ + { + "bbox": [ + 136, + 194, + 179, + 229 + ], + "lines": [ + { + "bbox": [ + 136, + 194, + 179, + 229 + ], + "spans": [ + { + "bbox": [ + 136, + 194, + 179, + 229 + ], + "type": "image", + "image_path": "a60ad6a9cc833737461a840efa4f6a01f8ba6ccbb0a93d01b817ef50dc45e361.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 202, + 192, + 235, + 225 + ], + "blocks": [ + { + "bbox": [ + 202, + 192, + 235, + 225 + ], + "lines": [ + { + "bbox": [ + 202, + 192, + 235, + 225 + ], + "spans": [ + { + "bbox": [ + 202, + 192, + 235, + 225 + ], + "type": "image", + "image_path": "ecf21b7d9cbd5dd7fe914185b4dd246cda08fcb78323f43b632ec0f01026af01.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 210, + 229, + 214, + 235 + ], + "lines": [ + { + "bbox": [ + 210, + 229, + 214, + 235 + ], + "spans": [ + { + "bbox": [ + 210, + 229, + 214, + 235 + ], + "type": "text", + "content": "B" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 138, + 246, + 176, + 271 + ], + "blocks": [ + { + "bbox": [ + 138, + 246, + 176, + 271 + ], + "lines": [ + { + "bbox": [ + 138, + 246, + 176, + 271 + ], + "spans": [ + { + "bbox": [ + 138, + 246, + 176, + 271 + ], + "type": "image", + "image_path": "56840636c47c1f5df7225bd5590c34bae5d99f28eed1a5afd5b2806ace902ec9.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 153, + 273, + 157, + 278 + ], + "lines": [ + { + "bbox": [ + 153, + 273, + 157, + 278 + ], + "spans": [ + { + "bbox": [ + 153, + 273, + 157, + 278 + ], + "type": "text", + "content": "C" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 202, + 246, + 249, + 272 + ], + "blocks": [ + { + "bbox": [ + 202, + 246, + 249, + 272 + ], + "lines": [ + { + "bbox": [ + 202, + 246, + 249, + 272 + ], + "spans": [ + { + "bbox": [ + 202, + 246, + 249, + 272 + ], + "type": "image", + "image_path": "b93bf93c475a4531b9fbb26b1242800aad6dc72e38106580e5a55971721ed1bd.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 219, + 273, + 223, + 278 + ], + "lines": [ + { + "bbox": [ + 219, + 273, + 223, + 278 + ], + "spans": [ + { + "bbox": [ + 219, + 273, + 223, + 278 + ], + "type": "text", + "content": "D" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 278, + 169, + 484, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 278, + 169, + 484, + 190 + ], + "spans": [ + { + "bbox": [ + 278, + 169, + 484, + 190 + ], + "type": "text", + "content": "Question: Given the analogy in the first line of items, which choice fits best in the black question mark?" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 279, + 196, + 348, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 279, + 196, + 348, + 207 + ], + "spans": [ + { + "bbox": [ + 279, + 196, + 348, + 207 + ], + "type": "text", + "content": "Ground Truth: (C)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 113, + 283, + 359, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 283, + 359, + 293 + ], + "spans": [ + { + "bbox": [ + 113, + 283, + 359, + 293 + ], + "type": "text", + "content": "Claude-3.7-Sonnet-Thinking: Let's analyze the analogy in the first line:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 113, + 299, + 381, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 299, + 381, + 308 + ], + "spans": [ + { + "bbox": [ + 113, + 299, + 381, + 308 + ], + "type": "text", + "content": "The first line shows sliced bread on the left and a knife on the right, connected by an arrow." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 112, + 315, + 497, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 315, + 497, + 331 + ], + "spans": [ + { + "bbox": [ + 112, + 315, + 497, + 331 + ], + "type": "text", + "content": "The relationship here appears to be: the knife is a tool used to cut/slice the bread. The knife is what transforms a whole loaf of bread into slices." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 113, + 338, + 212, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 338, + 212, + 347 + ], + "spans": [ + { + "bbox": [ + 113, + 338, + 212, + 347 + ], + "type": "text", + "content": "Now, let's look at the second line:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 114, + 347, + 258, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 347, + 258, + 355 + ], + "spans": [ + { + "bbox": [ + 114, + 347, + 258, + 355 + ], + "type": "text", + "content": "- On the left, we see a log (a section of tree trunk)" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 114, + 355, + 372, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 355, + 372, + 364 + ], + "spans": [ + { + "bbox": [ + 114, + 355, + 372, + 364 + ], + "type": "text", + "content": "- On the right, we need to determine which tool would be used to process or cut this log" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 114, + 371, + 193, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 371, + 193, + 379 + ], + "spans": [ + { + "bbox": [ + 114, + 371, + 193, + 379 + ], + "type": "text", + "content": "Let's examine each option:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 114, + 379, + 224, + 411 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 114, + 379, + 224, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 379, + 224, + 387 + ], + "spans": [ + { + "bbox": [ + 114, + 379, + 224, + 387 + ], + "type": "text", + "content": "(A) A wooden shed or small building" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 115, + 387, + 205, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 387, + 205, + 394 + ], + "spans": [ + { + "bbox": [ + 115, + 387, + 205, + 394 + ], + "type": "text", + "content": "(B) A cleaver or butcher's knife" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 115, + 395, + 147, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 395, + 147, + 402 + ], + "spans": [ + { + "bbox": [ + 115, + 395, + 147, + 402 + ], + "type": "text", + "content": "(C) An axe" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 115, + 403, + 221, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 403, + 221, + 411 + ], + "spans": [ + { + "bbox": [ + 115, + 403, + 221, + 411 + ], + "type": "text", + "content": "(D) Sliced butter on a wooden board" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 418, + 435, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 418, + 435, + 427 + ], + "spans": [ + { + "bbox": [ + 113, + 418, + 435, + 427 + ], + "type": "text", + "content": "The question is asking which tool would be used to process a log, similar to how a knife is used to slice bread." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 113, + 434, + 394, + 467 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 113, + 434, + 394, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 434, + 394, + 442 + ], + "spans": [ + { + "bbox": [ + 113, + 434, + 394, + 442 + ], + "type": "text", + "content": "- Option A (wooden shed) is not a tool but a structure that might be built using processed wood" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 114, + 442, + 355, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 442, + 355, + 450 + ], + "spans": [ + { + "bbox": [ + 114, + 442, + 355, + 450 + ], + "type": "text", + "content": "- Option B (cleaver/butcher's knife) is a cutting tool but not typically used for logs" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 114, + 450, + 316, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 450, + 316, + 458 + ], + "spans": [ + { + "bbox": [ + 114, + 450, + 316, + 458 + ], + "type": "text", + "content": "- Option C (axe) is a tool specifically designed for cutting logs/wood" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 114, + 458, + 299, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 458, + 299, + 467 + ], + "spans": [ + { + "bbox": [ + 114, + 458, + 299, + 467 + ], + "type": "text", + "content": "- Option D (sliced butter) is not a tool at all, but a food product" + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 112, + 473, + 496, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 473, + 496, + 491 + ], + "spans": [ + { + "bbox": [ + 112, + 473, + 496, + 491 + ], + "type": "text", + "content": "Following the analogy \"bread " + }, + { + "bbox": [ + 112, + 473, + 496, + 491 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 112, + 473, + 496, + 491 + ], + "type": "text", + "content": " knife\" (something that needs to be cut " + }, + { + "bbox": [ + 112, + 473, + 496, + 491 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 112, + 473, + 496, + 491 + ], + "type": "text", + "content": " tool used to cut it), the logical completion for \"log " + }, + { + "bbox": [ + 112, + 473, + 496, + 491 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 112, + 473, + 496, + 491 + ], + "type": "text", + "content": " ?\" would be an axe, which is a tool specifically designed to cut wood." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 113, + 497, + 254, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 497, + 254, + 506 + ], + "spans": [ + { + "bbox": [ + 113, + 497, + 254, + 506 + ], + "type": "text", + "content": "Therefore, the answer appears to be (C) the axe." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 114, + 506, + 162, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 506, + 162, + 514 + ], + "spans": [ + { + "bbox": [ + 114, + 506, + 162, + 514 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 114, + 514, + 496, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 514, + 496, + 529 + ], + "spans": [ + { + "bbox": [ + 114, + 514, + 496, + 529 + ], + "type": "text", + "content": "I need to determine the relationship between the items in the first line and then apply the same relationship to find the missing item in the second line." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 113, + 537, + 186, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 537, + 186, + 545 + ], + "spans": [ + { + "bbox": [ + 113, + 537, + 186, + 545 + ], + "type": "text", + "content": "Looking at the first line:" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 114, + 545, + 323, + 569 + ], + "type": "list", + "angle": 0, + "index": 40, + "blocks": [ + { + "bbox": [ + 114, + 545, + 298, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 545, + 298, + 552 + ], + "spans": [ + { + "bbox": [ + 114, + 545, + 298, + 552 + ], + "type": "text", + "content": "- On the left, there's a loaf of bread with some slices cut from it" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 114, + 553, + 199, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 553, + 199, + 560 + ], + "spans": [ + { + "bbox": [ + 114, + 553, + 199, + 560 + ], + "type": "text", + "content": "- On the right, there's a knife" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 114, + 561, + 323, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 561, + 323, + 569 + ], + "spans": [ + { + "bbox": [ + 114, + 561, + 323, + 569 + ], + "type": "text", + "content": "- The relationship is that the knife is the tool used to cut/slice the bread" + } + ] + } + ], + "index": 39 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 576, + 189, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 576, + 189, + 584 + ], + "spans": [ + { + "bbox": [ + 113, + 576, + 189, + 584 + ], + "type": "text", + "content": "Now, for the second line:" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 114, + 585, + 388, + 601 + ], + "type": "list", + "angle": 0, + "index": 44, + "blocks": [ + { + "bbox": [ + 114, + 585, + 257, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 585, + 257, + 593 + ], + "spans": [ + { + "bbox": [ + 114, + 585, + 257, + 593 + ], + "type": "text", + "content": "- On the left, there's a log (a section of tree trunk)" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 114, + 593, + 388, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 593, + 388, + 601 + ], + "spans": [ + { + "bbox": [ + 114, + 593, + 388, + 601 + ], + "type": "text", + "content": "- On the right, I need to find the appropriate tool that would be used to cut or process this log" + } + ] + } + ], + "index": 43 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 608, + 198, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 608, + 198, + 616 + ], + "spans": [ + { + "bbox": [ + 113, + 608, + 198, + 616 + ], + "type": "text", + "content": "Let me analyze each option:" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 114, + 616, + 475, + 649 + ], + "type": "list", + "angle": 0, + "index": 50, + "blocks": [ + { + "bbox": [ + 114, + 616, + 475, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 616, + 475, + 624 + ], + "spans": [ + { + "bbox": [ + 114, + 616, + 475, + 624 + ], + "type": "text", + "content": "(A) A wooden shed/small building - This is a structure that might be built using processed wood, not a tool for cutting logs" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 115, + 624, + 370, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 624, + 370, + 632 + ], + "spans": [ + { + "bbox": [ + 115, + 624, + 370, + 632 + ], + "type": "text", + "content": "(B) A cleaver/butcher's knife - While this is a cutting tool, it's not typically used for logs" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 115, + 632, + 360, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 632, + 360, + 640 + ], + "spans": [ + { + "bbox": [ + 115, + 632, + 360, + 640 + ], + "type": "text", + "content": "(C) An axe - This is a tool specifically designed for chopping and cutting logs/wood" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 115, + 640, + 353, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 640, + 353, + 649 + ], + "spans": [ + { + "bbox": [ + 115, + 640, + 353, + 649 + ], + "type": "text", + "content": "(D) Sliced butter on a board - This is a food product, not a tool for processing logs" + } + ] + } + ], + "index": 49 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 655, + 280, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 655, + 280, + 664 + ], + "spans": [ + { + "bbox": [ + 113, + 655, + 280, + 664 + ], + "type": "text", + "content": "The pattern is \"material " + }, + { + "bbox": [ + 113, + 655, + 280, + 664 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 113, + 655, + 280, + 664 + ], + "type": "text", + "content": " tool used to cut that material\"" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 114, + 664, + 162, + 680 + ], + "type": "list", + "angle": 0, + "index": 54, + "blocks": [ + { + "bbox": [ + 114, + 664, + 162, + 671 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 664, + 162, + 671 + ], + "spans": [ + { + "bbox": [ + 114, + 664, + 162, + 671 + ], + "type": "text", + "content": "-Bread " + }, + { + "bbox": [ + 114, + 664, + 162, + 671 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 114, + 664, + 162, + 671 + ], + "type": "text", + "content": " Knife" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 114, + 671, + 143, + 680 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 671, + 143, + 680 + ], + "spans": [ + { + "bbox": [ + 114, + 671, + 143, + 680 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 114, + 671, + 143, + 680 + ], + "type": "inline_equation", + "content": "\\mathrm{Log}\\rightarrow ?" + } + ] + } + ], + "index": 53 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 687, + 398, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 687, + 398, + 696 + ], + "spans": [ + { + "bbox": [ + 113, + 687, + 398, + 696 + ], + "type": "text", + "content": "An axe is the tool specifically designed for cutting logs, just as a knife is the tool for slicing bread." + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 114, + 703, + 147, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 703, + 147, + 711 + ], + "spans": [ + { + "bbox": [ + 114, + 703, + 147, + 711 + ], + "type": "text", + "content": "Answer: C" + } + ] + } + ], + "index": 56 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "34" + } + ] + } + ], + "index": 58 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 33 + }, + { + "para_blocks": [ + { + "bbox": [ + 251, + 88, + 354, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 251, + 88, + 354, + 102 + ], + "spans": [ + { + "bbox": [ + 251, + 88, + 354, + 102 + ], + "type": "text", + "content": "Analogical (Medium)" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 362, + 89, + 375, + 100 + ], + "blocks": [ + { + "bbox": [ + 362, + 89, + 375, + 100 + ], + "lines": [ + { + "bbox": [ + 362, + 89, + 375, + 100 + ], + "spans": [ + { + "bbox": [ + 362, + 89, + 375, + 100 + ], + "type": "image", + "image_path": "b175172cabef3457d3fbf51009db02477dbf908a85f4f7569d8011b8942a5875.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 120, + 106, + 148, + 150 + ], + "blocks": [ + { + "bbox": [ + 120, + 106, + 148, + 150 + ], + "lines": [ + { + "bbox": [ + 120, + 106, + 148, + 150 + ], + "spans": [ + { + "bbox": [ + 120, + 106, + 148, + 150 + ], + "type": "image", + "image_path": "b7d7d1a6defad214c382f124a73a41a9d9160dcaaf744d712a10ca291058ec55.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 155, + 110, + 198, + 146 + ], + "blocks": [ + { + "bbox": [ + 155, + 110, + 198, + 146 + ], + "lines": [ + { + "bbox": [ + 155, + 110, + 198, + 146 + ], + "spans": [ + { + "bbox": [ + 155, + 110, + 198, + 146 + ], + "type": "image", + "image_path": "d20318dee2d84b8a10a978ba5d30a8fbe102063bc9651d8ef76d1f77c1d7ef91.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 198, + 127, + 205, + 133 + ], + "lines": [ + { + "bbox": [ + 198, + 127, + 205, + 133 + ], + "spans": [ + { + "bbox": [ + 198, + 127, + 205, + 133 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 205, + 110, + 250, + 146 + ], + "blocks": [ + { + "bbox": [ + 205, + 110, + 250, + 146 + ], + "lines": [ + { + "bbox": [ + 205, + 110, + 250, + 146 + ], + "spans": [ + { + "bbox": [ + 205, + 110, + 250, + 146 + ], + "type": "image", + "image_path": "c92374dde78af9c0322744c19117f204b609d167b4de3d59228ed153f09a6a03.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 251, + 107, + 290, + 149 + ], + "blocks": [ + { + "bbox": [ + 251, + 107, + 290, + 149 + ], + "lines": [ + { + "bbox": [ + 251, + 107, + 290, + 149 + ], + "spans": [ + { + "bbox": [ + 251, + 107, + 290, + 149 + ], + "type": "image", + "image_path": "7ee0bf8793fb3d09c02768e277095ab8284e9064c0e822e409949164f9d52fe8.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 123, + 167, + 147, + 203 + ], + "blocks": [ + { + "bbox": [ + 123, + 167, + 147, + 203 + ], + "lines": [ + { + "bbox": [ + 123, + 167, + 147, + 203 + ], + "spans": [ + { + "bbox": [ + 123, + 167, + 147, + 203 + ], + "type": "image", + "image_path": "34dd8ccdd69dd7fd4e38f6df7a7500a4d561d79b031d7cf70a372c6481e767f5.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 134, + 723, + 476, + 736 + ], + "lines": [ + { + "bbox": [ + 134, + 723, + 476, + 736 + ], + "spans": [ + { + "bbox": [ + 134, + 723, + 476, + 736 + ], + "type": "text", + "content": "Figure 22: A sample error case of Analogical Reasoning (difficulty: Medium)." + } + ] + } + ], + "index": 62, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 163, + 167, + 196, + 203 + ], + "blocks": [ + { + "bbox": [ + 189, + 159, + 216, + 165 + ], + "lines": [ + { + "bbox": [ + 189, + 159, + 216, + 165 + ], + "spans": [ + { + "bbox": [ + 189, + 159, + 216, + 165 + ], + "type": "text", + "content": "Candidates" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 163, + 167, + 196, + 203 + ], + "lines": [ + { + "bbox": [ + 163, + 167, + 196, + 203 + ], + "spans": [ + { + "bbox": [ + 163, + 167, + 196, + 203 + ], + "type": "image", + "image_path": "784558a7f58d3124ed539611b33a54af63298c5695a8d03677d909c7112a73fd.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 203, + 167, + 247, + 203 + ], + "blocks": [ + { + "bbox": [ + 203, + 167, + 247, + 203 + ], + "lines": [ + { + "bbox": [ + 203, + 167, + 247, + 203 + ], + "spans": [ + { + "bbox": [ + 203, + 167, + 247, + 203 + ], + "type": "image", + "image_path": "5c75fc66ff632914871c1c14e47d85c5dbae9791373244a7835cb9bafd5595e0.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 251, + 167, + 286, + 203 + ], + "blocks": [ + { + "bbox": [ + 251, + 167, + 286, + 203 + ], + "lines": [ + { + "bbox": [ + 251, + 167, + 286, + 203 + ], + "spans": [ + { + "bbox": [ + 251, + 167, + 286, + 203 + ], + "type": "image", + "image_path": "2d8c23c6642e4fdc059fce954cbe74392aa42e50ac339d36ec570b4538d1b9cd.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 300, + 130, + 484, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 130, + 484, + 160 + ], + "spans": [ + { + "bbox": [ + 300, + 130, + 484, + 160 + ], + "type": "text", + "content": "Question: Based on the relationship between Origin and Analogy, select the option from the candidates that best completes this analogy." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 301, + 166, + 370, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 166, + 370, + 177 + ], + "spans": [ + { + "bbox": [ + 301, + 166, + 370, + 177 + ], + "type": "text", + "content": "Ground Truth: (C)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 113, + 208, + 497, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 208, + 497, + 222 + ], + "spans": [ + { + "bbox": [ + 113, + 208, + 497, + 222 + ], + "type": "text", + "content": "Clae3-7-Sonnt-Thinkin: thinkin> h i th f th t, and then identify which candidate option (A, B, C, or D) best completes the right side of the equation when paired with \"Origini\"." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 113, + 227, + 172, + 234 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 227, + 172, + 234 + ], + "spans": [ + { + "bbox": [ + 113, + 227, + 172, + 234 + ], + "type": "text", + "content": "Let's analyze the images:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 113, + 239, + 136, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 239, + 136, + 245 + ], + "spans": [ + { + "bbox": [ + 113, + 239, + 136, + 245 + ], + "type": "text", + "content": "Left side:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 114, + 246, + 321, + 252 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 246, + 321, + 252 + ], + "spans": [ + { + "bbox": [ + 114, + 246, + 321, + 252 + ], + "type": "text", + "content": "- Origin: A person installing or working on what appears to be a large appliance or system" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 114, + 253, + 358, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 253, + 358, + 259 + ], + "spans": [ + { + "bbox": [ + 114, + 253, + 358, + 259 + ], + "type": "text", + "content": "- Analogy: A person installing or repairing what looks like a garage door opener or ceiling-mounted device" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 114, + 264, + 140, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 264, + 140, + 270 + ], + "spans": [ + { + "bbox": [ + 114, + 264, + 140, + 270 + ], + "type": "text", + "content": "Right side:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 114, + 270, + 386, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 270, + 386, + 277 + ], + "spans": [ + { + "bbox": [ + 114, + 270, + 386, + 277 + ], + "type": "text", + "content": "- Origin: A person's hand operating what appears to be a sound mixing board or control panel with sliders and buttons" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 114, + 277, + 280, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 277, + 280, + 283 + ], + "spans": [ + { + "bbox": [ + 114, + 277, + 280, + 283 + ], + "type": "text", + "content": "- Analogy': This is what we need to determine from options A, B, C, or D" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 114, + 289, + 248, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 289, + 248, + 295 + ], + "spans": [ + { + "bbox": [ + 114, + 289, + 248, + 295 + ], + "type": "text", + "content": "The relationship between Origin and Analogy seems to be:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 114, + 295, + 496, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 295, + 496, + 308 + ], + "spans": [ + { + "bbox": [ + 114, + 295, + 496, + 308 + ], + "type": "text", + "content": "Both involve installation, repair, or maintenance of mechanical/electrical systems, but in different contexts. They show people actively working on or operating different types of equipment or machinery." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 113, + 313, + 422, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 313, + 422, + 320 + ], + "spans": [ + { + "bbox": [ + 113, + 313, + 422, + 320 + ], + "type": "text", + "content": "Now, let's look at the candidate options and see which one best represents the same relationship with Origin' (the sound mixing board):" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 114, + 326, + 230, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 326, + 230, + 332 + ], + "spans": [ + { + "bbox": [ + 114, + 326, + 230, + 332 + ], + "type": "text", + "content": "A: A person painting or working on a ceiling/wall" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 114, + 332, + 325, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 332, + 325, + 338 + ], + "spans": [ + { + "bbox": [ + 114, + 332, + 325, + 338 + ], + "type": "text", + "content": "B: A close-up of someone working on or repairing what appears to be electronic components" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 114, + 339, + 244, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 339, + 244, + 346 + ], + "spans": [ + { + "bbox": [ + 114, + 339, + 244, + 346 + ], + "type": "text", + "content": "C: Some mechanical or electronic equipment with wiring" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 114, + 346, + 356, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 346, + 356, + 352 + ], + "spans": [ + { + "bbox": [ + 114, + 346, + 356, + 352 + ], + "type": "text", + "content": "D: People moving or installing what looks like a home appliance (possibly a refrigerator or washer/dryer)" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 114, + 357, + 457, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 357, + 457, + 364 + ], + "spans": [ + { + "bbox": [ + 114, + 357, + 457, + 364 + ], + "type": "text", + "content": "I need to determine which option shows a relationship to the sound mixing board (Origin') that parallels the relationship between the first two images." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 113, + 369, + 496, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 369, + 496, + 382 + ], + "spans": [ + { + "bbox": [ + 113, + 369, + 496, + 382 + ], + "type": "text", + "content": "If Origin shows someone installing/repairing an appliance, and Analogy shows someone installing/repairing a different type of equipment (garage door opener), then the relationship is about installation/repair of different types of systems." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 113, + 387, + 489, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 387, + 489, + 395 + ], + "spans": [ + { + "bbox": [ + 113, + 387, + 489, + 395 + ], + "type": "text", + "content": "Similarly, if Origin' shows someone operating a sound mixing board, then Analogy' should show someone operating a different type of equipment or control system." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 114, + 399, + 168, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 399, + 168, + 406 + ], + "spans": [ + { + "bbox": [ + 114, + 399, + 168, + 406 + ], + "type": "text", + "content": "Looking at the options:" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 114, + 406, + 324, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 406, + 324, + 413 + ], + "spans": [ + { + "bbox": [ + 114, + 406, + 324, + 413 + ], + "type": "text", + "content": "A: Shows installation/repair work (similar to Origin/ Analogy) but not operation of controls" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 114, + 414, + 398, + 420 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 414, + 398, + 420 + ], + "spans": [ + { + "bbox": [ + 114, + 414, + 398, + 420 + ], + "type": "text", + "content": "B: Shows repair/maintenance of electronic components" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 114, + 420, + 246, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 420, + 246, + 426 + ], + "spans": [ + { + "bbox": [ + 114, + 420, + 246, + 426 + ], + "type": "text", + "content": "C: Shows equipment but not clearly someone operating it." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 114, + 426, + 227, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 426, + 227, + 431 + ], + "spans": [ + { + "bbox": [ + 114, + 426, + 227, + 431 + ], + "type": "text", + "content": "D: Shows people moving/installing an appliance" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 114, + 437, + 495, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 437, + 495, + 443 + ], + "spans": [ + { + "bbox": [ + 114, + 437, + 495, + 443 + ], + "type": "text", + "content": "The key relationship between Origin and Analogy seems to be: a person actively working on installing or maintaining a system. Similarly, Origin' shows a person" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 114, + 443, + 229, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 443, + 229, + 450 + ], + "spans": [ + { + "bbox": [ + 114, + 443, + 229, + 450 + ], + "type": "text", + "content": "actively operating a control system (mixing board)" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 114, + 455, + 496, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 455, + 496, + 468 + ], + "spans": [ + { + "bbox": [ + 114, + 455, + 496, + 468 + ], + "type": "text", + "content": "Option B seems closest as it shows hands working on electronic components, which could be considered operating or maintaining another type of electronic system. Though it's not entirely clear from the image, this appears to be the closest match to the relationship pattern." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 114, + 468, + 151, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 468, + 151, + 474 + ], + "spans": [ + { + "bbox": [ + 114, + 468, + 151, + 474 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 114, + 475, + 160, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 475, + 160, + 480 + ], + "spans": [ + { + "bbox": [ + 114, + 475, + 160, + 480 + ], + "type": "text", + "content": "Analogy Analysis" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 113, + 486, + 495, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 486, + 495, + 499 + ], + "spans": [ + { + "bbox": [ + 113, + 486, + 495, + 499 + ], + "type": "text", + "content": "I need to understand the relationship between \"Origin\" and \"Analogy\" in the first pair, then apply that same relationship to find what completes the \"Origin\" and \"Analogy\" pair." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 114, + 504, + 178, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 504, + 178, + 510 + ], + "spans": [ + { + "bbox": [ + 114, + 504, + 178, + 510 + ], + "type": "text", + "content": "Examining the first pair:" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 114, + 510, + 380, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 510, + 380, + 517 + ], + "spans": [ + { + "bbox": [ + 114, + 510, + 380, + 517 + ], + "type": "text", + "content": "- \\*\\*Origin\\*\\*: Shows a person installing or repairing what appears to be a large appliance (possibly a washer or dryer)" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 114, + 517, + 383, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 517, + 383, + 524 + ], + "spans": [ + { + "bbox": [ + 114, + 517, + 383, + 524 + ], + "type": "text", + "content": "- \\*\\*Analogy\\*\\*: Shows a person installing or working on a ceiling-mounted device (appears to be a garage door opener)" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 113, + 529, + 473, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 529, + 473, + 536 + ], + "spans": [ + { + "bbox": [ + 113, + 529, + 473, + 536 + ], + "type": "text", + "content": "The relationship seems to be: Both involve installation/repair/maintenance of household mechanical systems, but in different locations and of different types." + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 114, + 542, + 185, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 542, + 185, + 548 + ], + "spans": [ + { + "bbox": [ + 114, + 542, + 185, + 548 + ], + "type": "text", + "content": "Examining the second pair:" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 114, + 548, + 358, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 548, + 358, + 554 + ], + "spans": [ + { + "bbox": [ + 114, + 548, + 358, + 554 + ], + "type": "text", + "content": "- \\*\\*Origin\\*\\*: Shows hands operating a sound mixing board or audio control panel with sliders and buttons" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 114, + 554, + 228, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 554, + 228, + 560 + ], + "spans": [ + { + "bbox": [ + 114, + 554, + 228, + 560 + ], + "type": "text", + "content": "- \\*\\*Analogy\\*\\*: This is what we need to determine" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 113, + 566, + 496, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 566, + 496, + 579 + ], + "spans": [ + { + "bbox": [ + 113, + 566, + 496, + 579 + ], + "type": "text", + "content": "I need to find an option that shows someone working with or operating another type of electronic/technical equipment, maintaining the same relationship as the first pair." + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 114, + 585, + 175, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 585, + 175, + 590 + ], + "spans": [ + { + "bbox": [ + 114, + 585, + 175, + 590 + ], + "type": "text", + "content": "Analyzing the options:" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 114, + 591, + 250, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 591, + 250, + 597 + ], + "spans": [ + { + "bbox": [ + 114, + 591, + 250, + 597 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 114, + 591, + 250, + 597 + ], + "type": "inline_equation", + "content": "\\mathrm{^{**}A^{**}}" + }, + { + "bbox": [ + 114, + 591, + 250, + 597 + ], + "type": "text", + "content": ": Person painting or installing something on a ceiling" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 114, + 597, + 285, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 597, + 285, + 604 + ], + "spans": [ + { + "bbox": [ + 114, + 597, + 285, + 604 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 114, + 597, + 285, + 604 + ], + "type": "inline_equation", + "content": "\\mathbf{\\Pi}^{**}\\mathbf{B}^{**}" + }, + { + "bbox": [ + 114, + 597, + 285, + 604 + ], + "type": "text", + "content": " : Close-up of hands working on electronic components/circuit board" + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 114, + 604, + 260, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 604, + 260, + 609 + ], + "spans": [ + { + "bbox": [ + 114, + 604, + 260, + 609 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 114, + 604, + 260, + 609 + ], + "type": "inline_equation", + "content": "\\mathbf{\\ddot{C}}^{\\star \\star}" + }, + { + "bbox": [ + 114, + 604, + 260, + 609 + ], + "type": "text", + "content": " : Complex mechanical/electronic equipment with wiring" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 114, + 609, + 312, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 609, + 312, + 616 + ], + "spans": [ + { + "bbox": [ + 114, + 609, + 312, + 616 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 114, + 609, + 312, + 616 + ], + "type": "inline_equation", + "content": "\\mathbf{^{**}D^{**}}" + }, + { + "bbox": [ + 114, + 609, + 312, + 616 + ], + "type": "text", + "content": ": Two people moving or installing what appears to be a refrigerator or appliance" + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 114, + 621, + 428, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 621, + 428, + 628 + ], + "spans": [ + { + "bbox": [ + 114, + 621, + 428, + 628 + ], + "type": "text", + "content": "The relationship we're looking for should show hands working on/operating a different type of technical equipment than a mixing board." + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 113, + 634, + 496, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 634, + 496, + 647 + ], + "spans": [ + { + "bbox": [ + 113, + 634, + 496, + 647 + ], + "type": "text", + "content": "Option B shows hands working directly with electronic components/circuit board, which parallels how the hands in Origin are working directly with the audio equipment. Both involve hands-on technical work with electronic systems, just different types." + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 114, + 652, + 139, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 652, + 139, + 658 + ], + "spans": [ + { + "bbox": [ + 114, + 652, + 139, + 658 + ], + "type": "text", + "content": "Answer: B" + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 114, + 662, + 231, + 671 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 662, + 231, + 671 + ], + "spans": [ + { + "bbox": [ + 114, + 662, + 231, + 671 + ], + "type": "text", + "content": "Error Category: Reasoning Error" + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 113, + 672, + 496, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 672, + 496, + 711 + ], + "spans": [ + { + "bbox": [ + 113, + 672, + 496, + 711 + ], + "type": "text", + "content": "Error Reason: The model gets the answer wrong because it overemphasizes the type of equipment being worked on (mechanical vs. electronic) rather than the specific action and interface involved in the analogy. Option C correctly mirrors the using / fixing relationship seen in the first pair of analogy, while Option B lacks this direct fixing interaction, despite a similar technical context." + } + ] + } + ], + "index": 61 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "35" + } + ] + } + ], + "index": 63 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 34 + }, + { + "para_blocks": [ + { + "bbox": [ + 251, + 88, + 354, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 251, + 88, + 354, + 102 + ], + "spans": [ + { + "bbox": [ + 251, + 88, + 354, + 102 + ], + "type": "text", + "content": "Analogical (Medium)" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 363, + 89, + 375, + 100 + ], + "blocks": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "lines": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "spans": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "type": "image", + "image_path": "257c05296d2ad268a2d7c6e59b7a42d0258eec902ec9535ec6137f13a615e6da.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 111, + 114, + 162, + 156 + ], + "blocks": [ + { + "bbox": [ + 111, + 114, + 162, + 156 + ], + "lines": [ + { + "bbox": [ + 111, + 114, + 162, + 156 + ], + "spans": [ + { + "bbox": [ + 111, + 114, + 162, + 156 + ], + "type": "image", + "image_path": "ab47e0324a8f3696477f82c394854dd355acec65de459490a80c26dc69f8f9ff.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 163, + 113, + 214, + 157 + ], + "blocks": [ + { + "bbox": [ + 163, + 113, + 214, + 157 + ], + "lines": [ + { + "bbox": [ + 163, + 113, + 214, + 157 + ], + "spans": [ + { + "bbox": [ + 163, + 113, + 214, + 157 + ], + "type": "image", + "image_path": "7f678c3abec399cf19f625891724b356cd35e119f8affb3fdaca193802247dc2.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 214, + 114, + 278, + 156 + ], + "blocks": [ + { + "bbox": [ + 214, + 114, + 278, + 156 + ], + "lines": [ + { + "bbox": [ + 214, + 114, + 278, + 156 + ], + "spans": [ + { + "bbox": [ + 214, + 114, + 278, + 156 + ], + "type": "image", + "image_path": "85fc0acebdc041898bc46ceeeb24682fb59f7759acc9e281093e3a19ec0fdb83.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 278, + 107, + 331, + 163 + ], + "blocks": [ + { + "bbox": [ + 278, + 107, + 331, + 163 + ], + "lines": [ + { + "bbox": [ + 278, + 107, + 331, + 163 + ], + "spans": [ + { + "bbox": [ + 278, + 107, + 331, + 163 + ], + "type": "image", + "image_path": "ad4b61a98884381daa9f1303c0f51f059c4fb92172fa528912856cb90462826f.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 111, + 186, + 165, + 227 + ], + "blocks": [ + { + "bbox": [ + 111, + 186, + 165, + 227 + ], + "lines": [ + { + "bbox": [ + 111, + 186, + 165, + 227 + ], + "spans": [ + { + "bbox": [ + 111, + 186, + 165, + 227 + ], + "type": "image", + "image_path": "0c9852c365df3b1611c71ccabfed0cc26a574ebf7515ee1488d453c71195d0e9.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 175, + 184, + 210, + 230 + ], + "blocks": [ + { + "bbox": [ + 204, + 175, + 236, + 182 + ], + "lines": [ + { + "bbox": [ + 204, + 175, + 236, + 182 + ], + "spans": [ + { + "bbox": [ + 204, + 175, + 236, + 182 + ], + "type": "text", + "content": "Candidates" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 175, + 184, + 210, + 230 + ], + "lines": [ + { + "bbox": [ + 175, + 184, + 210, + 230 + ], + "spans": [ + { + "bbox": [ + 175, + 184, + 210, + 230 + ], + "type": "image", + "image_path": "0ee04f55113fc5aace0ec41a462c21b3615295a732c880c148e7b6f41902a4d8.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 220, + 185, + 274, + 228 + ], + "blocks": [ + { + "bbox": [ + 220, + 185, + 274, + 228 + ], + "lines": [ + { + "bbox": [ + 220, + 185, + 274, + 228 + ], + "spans": [ + { + "bbox": [ + 220, + 185, + 274, + 228 + ], + "type": "image", + "image_path": "610adf6221d802cd88a3f2515ae82304ccb0bf5468106f467cd6d6c333f8fa7f.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 129, + 723, + 480, + 736 + ], + "lines": [ + { + "bbox": [ + 129, + 723, + 480, + 736 + ], + "spans": [ + { + "bbox": [ + 129, + 723, + 480, + 736 + ], + "type": "text", + "content": "Figure 23: A sample correct case of Analogical Reasoning (difficulty: Medium)." + } + ] + } + ], + "index": 52, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 276, + 186, + 329, + 228 + ], + "blocks": [ + { + "bbox": [ + 276, + 186, + 329, + 228 + ], + "lines": [ + { + "bbox": [ + 276, + 186, + 329, + 228 + ], + "spans": [ + { + "bbox": [ + 276, + 186, + 329, + 228 + ], + "type": "image", + "image_path": "e8a8f856fd35aa28d63335409c3b7e168800d1baf1d9d81477bd486902989eda.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 335, + 140, + 496, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 140, + 496, + 181 + ], + "spans": [ + { + "bbox": [ + 335, + 140, + 496, + 181 + ], + "type": "text", + "content": "Question: Based on the relationship between Origin and Analogy, select the option from the candidates that best completes this analogy." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 336, + 188, + 405, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 336, + 188, + 405, + 198 + ], + "spans": [ + { + "bbox": [ + 336, + 188, + 405, + 198 + ], + "type": "text", + "content": "Ground Truth: (B)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 113, + 239, + 495, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 239, + 495, + 259 + ], + "spans": [ + { + "bbox": [ + 113, + 239, + 495, + 259 + ], + "type": "text", + "content": "Claude-3.7-Sonnet-Thinking: Let me analyze the relationship between the \"Origin\" and \"Analogy\" images to determine the pattern, and then apply that to find the best match for \"Origin\" from the candidates." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 113, + 266, + 166, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 266, + 166, + 276 + ], + "spans": [ + { + "bbox": [ + 113, + 266, + 166, + 276 + ], + "type": "text", + "content": "In the first pair:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 113, + 276, + 446, + 293 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 113, + 276, + 375, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 276, + 375, + 285 + ], + "spans": [ + { + "bbox": [ + 113, + 276, + 375, + 285 + ], + "type": "text", + "content": "- Origin: A dog (looks like a spaniel or similar breed) searching/sniffing in grass" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 113, + 285, + 446, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 285, + 446, + 293 + ], + "spans": [ + { + "bbox": [ + 113, + 285, + 446, + 293 + ], + "type": "text", + "content": "- Analogy: What appears to be a small animal (possibly a rodent) in its natural habitat with vegetation" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 301, + 176, + 311 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 301, + 176, + 311 + ], + "spans": [ + { + "bbox": [ + 113, + 301, + 176, + 311 + ], + "type": "text", + "content": "In the second pair:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 113, + 311, + 395, + 328 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 113, + 311, + 395, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 311, + 395, + 319 + ], + "spans": [ + { + "bbox": [ + 113, + 311, + 395, + 319 + ], + "type": "text", + "content": "- Origin': A dog (appears to be a small white dog) eating or drinking from a bowl/dish" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 114, + 319, + 238, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 319, + 238, + 328 + ], + "spans": [ + { + "bbox": [ + 114, + 319, + 238, + 328 + ], + "type": "text", + "content": "- Analogy': We need to determine this" + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 336, + 495, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 336, + 495, + 356 + ], + "spans": [ + { + "bbox": [ + 113, + 336, + 495, + 356 + ], + "type": "text", + "content": "The relationship seems to be showing an animal (dog) in the Origin image, then showing what might be a prey animal or similar smaller animal in a related natural context in the Analogy image." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 113, + 362, + 476, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 362, + 476, + 373 + ], + "spans": [ + { + "bbox": [ + 113, + 362, + 476, + 373 + ], + "type": "text", + "content": "So, for Origin' which shows a dog eating/ drinking, the Analogy' should show some smaller animal also eating." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 113, + 380, + 201, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 380, + 201, + 390 + ], + "spans": [ + { + "bbox": [ + 113, + 380, + 201, + 390 + ], + "type": "text", + "content": "Looking at the candidates:" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 113, + 390, + 382, + 426 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 113, + 390, + 382, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 390, + 382, + 399 + ], + "spans": [ + { + "bbox": [ + 113, + 390, + 382, + 399 + ], + "type": "text", + "content": "A: Shows a dog with what appears to be a stick or something in its mouth in snow." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 114, + 399, + 284, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 399, + 284, + 407 + ], + "spans": [ + { + "bbox": [ + 114, + 399, + 284, + 407 + ], + "type": "text", + "content": "B: Shows a mouse or small rodent eating something" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 114, + 408, + 343, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 408, + 343, + 416 + ], + "spans": [ + { + "bbox": [ + 114, + 408, + 343, + 416 + ], + "type": "text", + "content": "C: Shows what looks like a groundhog or marmot near flowers/plants" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 114, + 417, + 233, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 417, + 233, + 426 + ], + "spans": [ + { + "bbox": [ + 114, + 417, + 233, + 426 + ], + "type": "text", + "content": "D: Shows another dog's face/profile" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 434, + 496, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 434, + 496, + 460 + ], + "spans": [ + { + "bbox": [ + 113, + 434, + 496, + 460 + ], + "type": "text", + "content": "Based on the pattern (dog " + }, + { + "bbox": [ + 113, + 434, + 496, + 460 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 113, + 434, + 496, + 460 + ], + "type": "text", + "content": " smaller animal in natural context, especially related to the activity the dog is doing), option B (the mouse/rodent eating) would best complete the analogy since it shows a smaller animal doing the same activity (eating) as the dog in Origin!" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 113, + 460, + 167, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 460, + 167, + 469 + ], + "spans": [ + { + "bbox": [ + 113, + 460, + 167, + 469 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 113, + 469, + 496, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 469, + 496, + 487 + ], + "spans": [ + { + "bbox": [ + 113, + 469, + 496, + 487 + ], + "type": "text", + "content": "I need to determine the relationship between the \"Origin\" and \"Analogy\" images, and then apply that same relationship to find the appropriate \"Analogy\"\" that matches \"Origin\"\"" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 113, + 495, + 194, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 495, + 194, + 505 + ], + "spans": [ + { + "bbox": [ + 113, + 495, + 194, + 505 + ], + "type": "text", + "content": "Looking at the first pair:" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 113, + 505, + 394, + 522 + ], + "type": "list", + "angle": 0, + "index": 37, + "blocks": [ + { + "bbox": [ + 113, + 505, + 374, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 505, + 374, + 514 + ], + "spans": [ + { + "bbox": [ + 113, + 505, + 374, + 514 + ], + "type": "text", + "content": "- Origin: A dog searching or sniffing in grass (appears to be hunting or tracking)" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 114, + 514, + 394, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 514, + 394, + 522 + ], + "spans": [ + { + "bbox": [ + 114, + 514, + 394, + 522 + ], + "type": "text", + "content": "- Analogy: A small animal (looks like a rodent) in its natural habitat among vegetation" + } + ] + } + ], + "index": 36 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 530, + 219, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 530, + 219, + 539 + ], + "spans": [ + { + "bbox": [ + 113, + 530, + 219, + 539 + ], + "type": "text", + "content": "Now looking at the second pair:" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 114, + 540, + 286, + 557 + ], + "type": "list", + "angle": 0, + "index": 41, + "blocks": [ + { + "bbox": [ + 114, + 540, + 286, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 540, + 286, + 548 + ], + "spans": [ + { + "bbox": [ + 114, + 540, + 286, + 548 + ], + "type": "text", + "content": "- Origin: A dog eating or drinking from a bowl/dish" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 114, + 548, + 258, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 548, + 258, + 557 + ], + "spans": [ + { + "bbox": [ + 114, + 548, + 258, + 557 + ], + "type": "text", + "content": "- Analogy': This is what I need to determine" + } + ] + } + ], + "index": 40 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 565, + 496, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 565, + 496, + 584 + ], + "spans": [ + { + "bbox": [ + 113, + 565, + 496, + 584 + ], + "type": "text", + "content": "The relationship between the first pair seems to be: a dog engaged in a natural behavior (hunting/tracking), followed by what could be its potential prey or a similar smaller animal in its natural environment." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 113, + 591, + 496, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 591, + 496, + 611 + ], + "spans": [ + { + "bbox": [ + 113, + 591, + 496, + 611 + ], + "type": "text", + "content": "Following this pattern, the \"Analogy\" should show a smaller animal engaged in eating or drinking (similar to what the dog is doing in \"Origin\")." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 113, + 618, + 201, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 618, + 201, + 628 + ], + "spans": [ + { + "bbox": [ + 113, + 618, + 201, + 628 + ], + "type": "text", + "content": "Examining the candidates:" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 114, + 628, + 356, + 663 + ], + "type": "list", + "angle": 0, + "index": 49, + "blocks": [ + { + "bbox": [ + 114, + 628, + 268, + 636 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 628, + 268, + 636 + ], + "spans": [ + { + "bbox": [ + 114, + 628, + 268, + 636 + ], + "type": "text", + "content": "A: A dog with something in its mouth on snow" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 114, + 637, + 237, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 637, + 237, + 645 + ], + "spans": [ + { + "bbox": [ + 114, + 637, + 237, + 645 + ], + "type": "text", + "content": "B: A mouse/small rodent eating food" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 114, + 646, + 356, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 646, + 356, + 654 + ], + "spans": [ + { + "bbox": [ + 114, + 646, + 356, + 654 + ], + "type": "text", + "content": "C: What appears to be a groundhog or marmot among vegetation/flowers" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 114, + 654, + 220, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 654, + 220, + 663 + ], + "spans": [ + { + "bbox": [ + 114, + 654, + 220, + 663 + ], + "type": "text", + "content": "D: A profile view of another dog" + } + ] + } + ], + "index": 48 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 671, + 496, + 690 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 671, + 496, + 690 + ], + "spans": [ + { + "bbox": [ + 113, + 671, + 496, + 690 + ], + "type": "text", + "content": "Since the pattern is showing a smaller animal doing a similar activity as the dog, option B (the mouse eating) best completes the analogy because it shows a smaller animal engaged in the same activity (eating) as the dog in Origin!" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 113, + 696, + 149, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 696, + 149, + 706 + ], + "spans": [ + { + "bbox": [ + 113, + 696, + 149, + 706 + ], + "type": "text", + "content": "Answer: B" + } + ] + } + ], + "index": 51 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "36" + } + ] + } + ], + "index": 53 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 35 + }, + { + "para_blocks": [ + { + "bbox": [ + 259, + 88, + 346, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 259, + 88, + 346, + 102 + ], + "spans": [ + { + "bbox": [ + 259, + 88, + 346, + 102 + ], + "type": "text", + "content": "Analogical (Hard)" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 363, + 89, + 375, + 100 + ], + "blocks": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "lines": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "spans": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "type": "image", + "image_path": "783d6eab2c018327ef49d4996b35460aeb2b724d7e4d20b7f7afa306007e0475.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 109, + 118, + 249, + 179 + ], + "blocks": [ + { + "bbox": [ + 109, + 118, + 249, + 179 + ], + "lines": [ + { + "bbox": [ + 109, + 118, + 249, + 179 + ], + "spans": [ + { + "bbox": [ + 109, + 118, + 249, + 179 + ], + "type": "image", + "image_path": "37189ecbd81689a89efc1f56cd6a8ccc393d42226fa9319b2192c3ead6b3c5f1.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 140, + 723, + 468, + 736 + ], + "lines": [ + { + "bbox": [ + 140, + 723, + 468, + 736 + ], + "spans": [ + { + "bbox": [ + 140, + 723, + 468, + 736 + ], + "type": "text", + "content": "Figure 24: A sample error case of Analogical Reasoning (difficulty: Hard)." + } + ] + } + ], + "index": 69, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 252, + 111, + 497, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 252, + 111, + 497, + 171 + ], + "spans": [ + { + "bbox": [ + 252, + 111, + 497, + 171 + ], + "type": "text", + "content": "Question: This question contains two sets of figures and 4 figures to choose from. The two sets of figures have certain similarities as well as differences. You are required to choose one option that is most suitable to replace the question mark. The correct answer should not only make the two sets of figures the most similar, but also shows uniqueness for the second set of figures." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 253, + 178, + 323, + 189 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 253, + 178, + 323, + 189 + ], + "spans": [ + { + "bbox": [ + 253, + 178, + 323, + 189 + ], + "type": "text", + "content": "Ground Truth: (A)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 113, + 196, + 496, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 196, + 496, + 210 + ], + "spans": [ + { + "bbox": [ + 113, + 196, + 496, + 210 + ], + "type": "text", + "content": "Claude-3.7-Sonnet-Thinking: Let me analyze this question carefully. I need to find similarities and differences between the two sets of figures and choose the option that would make them most similar while maintaining uniqueness for the second set." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 113, + 215, + 191, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 215, + 191, + 221 + ], + "spans": [ + { + "bbox": [ + 113, + 215, + 191, + 221 + ], + "type": "text", + "content": "The first set of figures consists of:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 114, + 222, + 361, + 240 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 114, + 222, + 361, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 222, + 361, + 228 + ], + "spans": [ + { + "bbox": [ + 114, + 222, + 361, + 228 + ], + "type": "text", + "content": "1. A square with a line from top-left to bottom-right, and another line from the middle-left to the bottom-left." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 114, + 228, + 181, + 234 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 228, + 181, + 234 + ], + "spans": [ + { + "bbox": [ + 114, + 228, + 181, + 234 + ], + "type": "text", + "content": "2. A square with zigzag lines." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 114, + 234, + 336, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 234, + 336, + 240 + ], + "spans": [ + { + "bbox": [ + 114, + 234, + 336, + 240 + ], + "type": "text", + "content": "3. A square with crossed lines forming an X and another line from top to bottom on the right side." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 246, + 198, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 246, + 198, + 251 + ], + "spans": [ + { + "bbox": [ + 113, + 246, + 198, + 251 + ], + "type": "text", + "content": "The second set of figures consists of:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 114, + 253, + 337, + 271 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 114, + 253, + 337, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 253, + 337, + 258 + ], + "spans": [ + { + "bbox": [ + 114, + 253, + 337, + 258 + ], + "type": "text", + "content": "1. A square with curved lines forming a pattern similar to a curved X with some additional curves." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 114, + 259, + 224, + 265 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 259, + 224, + 265 + ], + "spans": [ + { + "bbox": [ + 114, + 259, + 224, + 265 + ], + "type": "text", + "content": "2. A square with a circle and curved lines inside." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 114, + 265, + 240, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 265, + 240, + 271 + ], + "spans": [ + { + "bbox": [ + 114, + 265, + 240, + 271 + ], + "type": "text", + "content": "3. The question mark - this is what I need to determine." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 277, + 295, + 308 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 113, + 277, + 283, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 277, + 283, + 283 + ], + "spans": [ + { + "bbox": [ + 113, + 277, + 283, + 283 + ], + "type": "text", + "content": "Now let's look at the possible options for the third figure in the second set:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 114, + 285, + 258, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 285, + 258, + 289 + ], + "spans": [ + { + "bbox": [ + 114, + 285, + 258, + 289 + ], + "type": "text", + "content": "A. A square with curved semi-circles on the left and right sides" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 114, + 290, + 233, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 290, + 233, + 295 + ], + "spans": [ + { + "bbox": [ + 114, + 290, + 233, + 295 + ], + "type": "text", + "content": "B. A square with leaf-like shapes in the four corners." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 114, + 296, + 295, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 296, + 295, + 302 + ], + "spans": [ + { + "bbox": [ + 114, + 296, + 295, + 302 + ], + "type": "text", + "content": "C. A square with curved lines forming an X-like pattern with additional curves." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 114, + 303, + 246, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 303, + 246, + 308 + ], + "spans": [ + { + "bbox": [ + 114, + 303, + 246, + 308 + ], + "type": "text", + "content": "D. A square with star-like pattern created by curved lines." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 313, + 309, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 313, + 309, + 320 + ], + "spans": [ + { + "bbox": [ + 113, + 313, + 309, + 320 + ], + "type": "text", + "content": "To find the pattern, I need to look for similarities between the first set and second set:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 113, + 326, + 203, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 326, + 203, + 331 + ], + "spans": [ + { + "bbox": [ + 113, + 326, + 203, + 331 + ], + "type": "text", + "content": "Comparing the first figures of each set:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 114, + 333, + 201, + 345 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 114, + 333, + 197, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 333, + 197, + 338 + ], + "spans": [ + { + "bbox": [ + 114, + 333, + 197, + 338 + ], + "type": "text", + "content": "- First set: Square with straight lines" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 114, + 339, + 201, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 339, + 201, + 345 + ], + "spans": [ + { + "bbox": [ + 114, + 339, + 201, + 345 + ], + "type": "text", + "content": "- Second set: Square with curved lines" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 350, + 228, + 369 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 113, + 350, + 211, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 350, + 211, + 357 + ], + "spans": [ + { + "bbox": [ + 113, + 350, + 211, + 357 + ], + "type": "text", + "content": "Comparing the second figures of each set:" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 114, + 358, + 212, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 358, + 212, + 364 + ], + "spans": [ + { + "bbox": [ + 114, + 358, + 212, + 364 + ], + "type": "text", + "content": "- First set: Square with zigzag straight lines" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 114, + 364, + 228, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 364, + 228, + 369 + ], + "spans": [ + { + "bbox": [ + 114, + 364, + 228, + 369 + ], + "type": "text", + "content": "- Second set: Square with a circle and curved lines" + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 375, + 496, + 407 + ], + "type": "list", + "angle": 0, + "index": 34, + "blocks": [ + { + "bbox": [ + 113, + 375, + 496, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 375, + 496, + 388 + ], + "spans": [ + { + "bbox": [ + 113, + 375, + 496, + 388 + ], + "type": "text", + "content": "I notice that the second set seems to be using curved lines where the first set uses straight lines. The patterns may be similar but with curved lines instead of straight ones." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 113, + 393, + 496, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 393, + 496, + 407 + ], + "spans": [ + { + "bbox": [ + 113, + 393, + 496, + 407 + ], + "type": "text", + "content": "Looking at the third figure in the first set, it has crossed lines forming an X with an additional line. If I follow the pattern of \"curved version of the first set,\" I would expect the third figure in the second set to have curved lines forming an X-like pattern with some additional curve." + } + ] + } + ], + "index": 33 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 411, + 411, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 411, + 411, + 419 + ], + "spans": [ + { + "bbox": [ + 113, + 411, + 411, + 419 + ], + "type": "text", + "content": "Looking at the options, option C seems to match this pattern - it has curved lines forming an X-like pattern with additional curves." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 113, + 425, + 268, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 425, + 268, + 432 + ], + "spans": [ + { + "bbox": [ + 113, + 425, + 268, + 432 + ], + "type": "text", + "content": "However, I should also check if there's another pattern I'm missing:" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 113, + 437, + 496, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 437, + 496, + 451 + ], + "spans": [ + { + "bbox": [ + 113, + 437, + 496, + 451 + ], + "type": "text", + "content": "Another possibility is that there's a progression across each row. In the first set, we see increasing complexity or number of lines. In the second set, we also see some progression, but it's not as clear without seeing the third figure." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 113, + 455, + 496, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 455, + 496, + 468 + ], + "spans": [ + { + "bbox": [ + 113, + 455, + 496, + 468 + ], + "type": "text", + "content": "Given the information and options available, option C seems to be the most logical choice as it represents a curved version of the X pattern seen in the third figure of the first set." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 114, + 468, + 373, + 481 + ], + "type": "list", + "angle": 0, + "index": 41, + "blocks": [ + { + "bbox": [ + 114, + 468, + 152, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 468, + 152, + 474 + ], + "spans": [ + { + "bbox": [ + 114, + 468, + 152, + 474 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 114, + 474, + 373, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 474, + 373, + 481 + ], + "spans": [ + { + "bbox": [ + 114, + 474, + 373, + 481 + ], + "type": "text", + "content": "I need to analyze both sets of figures to find patterns and determine which option would complete the second set." + } + ] + } + ], + "index": 40 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 486, + 208, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 486, + 208, + 494 + ], + "spans": [ + { + "bbox": [ + 113, + 486, + 208, + 494 + ], + "type": "text", + "content": "First, let me examine both sets of figures:" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 114, + 498, + 168, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 498, + 168, + 505 + ], + "spans": [ + { + "bbox": [ + 114, + 498, + 168, + 505 + ], + "type": "text", + "content": "**First Set (Top Row):**" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 114, + 506, + 280, + 524 + ], + "type": "list", + "angle": 0, + "index": 47, + "blocks": [ + { + "bbox": [ + 114, + 506, + 259, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 506, + 259, + 511 + ], + "spans": [ + { + "bbox": [ + 114, + 506, + 259, + 511 + ], + "type": "text", + "content": "1. Square with straight lines forming an angle in the bottom left" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 114, + 512, + 197, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 512, + 197, + 517 + ], + "spans": [ + { + "bbox": [ + 114, + 512, + 197, + 517 + ], + "type": "text", + "content": "2. Square with zigzag diagonal lines" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 114, + 517, + 280, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 517, + 280, + 524 + ], + "spans": [ + { + "bbox": [ + 114, + 517, + 280, + 524 + ], + "type": "text", + "content": "3. Square with straight lines forming an X plus a vertical line on the right" + } + ] + } + ], + "index": 46 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 114, + 529, + 182, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 529, + 182, + 536 + ], + "spans": [ + { + "bbox": [ + 114, + 529, + 182, + 536 + ], + "type": "text", + "content": "**Second Set (Middle Row):**" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 114, + 536, + 263, + 554 + ], + "type": "list", + "angle": 0, + "index": 52, + "blocks": [ + { + "bbox": [ + 114, + 536, + 263, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 536, + 263, + 542 + ], + "spans": [ + { + "bbox": [ + 114, + 536, + 263, + 542 + ], + "type": "text", + "content": "1. Square with curved lines forming what looks like a distorted X" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 114, + 542, + 204, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 542, + 204, + 548 + ], + "spans": [ + { + "bbox": [ + 114, + 542, + 204, + 548 + ], + "type": "text", + "content": "2. Square with a circle and curved lines" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 114, + 548, + 197, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 548, + 197, + 554 + ], + "spans": [ + { + "bbox": [ + 114, + 548, + 197, + 554 + ], + "type": "text", + "content": "3. Question mark - to be determined" + } + ] + } + ], + "index": 51 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 559, + 414, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 559, + 414, + 567 + ], + "spans": [ + { + "bbox": [ + 113, + 559, + 414, + 567 + ], + "type": "text", + "content": "**Key Observation:** The second set appears to use curved lines to create patterns similar to the straight-line patterns in the first set." + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 114, + 572, + 321, + 591 + ], + "type": "list", + "angle": 0, + "index": 57, + "blocks": [ + { + "bbox": [ + 114, + 572, + 178, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 572, + 178, + 578 + ], + "spans": [ + { + "bbox": [ + 114, + 572, + 178, + 578 + ], + "type": "text", + "content": "Looking at the relationship:" + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 114, + 578, + 294, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 578, + 294, + 585 + ], + "spans": [ + { + "bbox": [ + 114, + 578, + 294, + 585 + ], + "type": "text", + "content": "- First figure in set 1 has straight angles; first figure in set 2 has curved versions" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 114, + 586, + 321, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 586, + 321, + 591 + ], + "spans": [ + { + "bbox": [ + 114, + 586, + 321, + 591 + ], + "type": "text", + "content": "- Second figure in set 1 has zigzag lines; second figure in set 2 has curved lines with a circle" + } + ] + } + ], + "index": 56 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 597, + 438, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 597, + 438, + 605 + ], + "spans": [ + { + "bbox": [ + 113, + 597, + 438, + 605 + ], + "type": "text", + "content": "For the third position, I should look for a figure that uses curved lines to represent the X-pattern plus vertical line from the third figure in set 1." + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 114, + 609, + 168, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 609, + 168, + 616 + ], + "spans": [ + { + "bbox": [ + 114, + 609, + 168, + 616 + ], + "type": "text", + "content": "Looking at the options:" + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 114, + 616, + 282, + 641 + ], + "type": "list", + "angle": 0, + "index": 64, + "blocks": [ + { + "bbox": [ + 114, + 616, + 257, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 616, + 257, + 622 + ], + "spans": [ + { + "bbox": [ + 114, + 616, + 257, + 622 + ], + "type": "text", + "content": "- Option A: Curved lines forming semi-circles on left and right" + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 114, + 622, + 251, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 622, + 251, + 628 + ], + "spans": [ + { + "bbox": [ + 114, + 622, + 251, + 628 + ], + "type": "text", + "content": "- Option B: Curved lines forming leaf-like shapes in corners" + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 114, + 628, + 282, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 628, + 282, + 635 + ], + "spans": [ + { + "bbox": [ + 114, + 628, + 282, + 635 + ], + "type": "text", + "content": "- Option C: Curved lines forming an X-like pattern with additional curves" + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 114, + 635, + 233, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 635, + 233, + 641 + ], + "spans": [ + { + "bbox": [ + 114, + 635, + 233, + 641 + ], + "type": "text", + "content": "- Option D: Curved lines forming a star-like pattern" + } + ] + } + ], + "index": 63 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 646, + 496, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 646, + 496, + 660 + ], + "spans": [ + { + "bbox": [ + 113, + 646, + 496, + 660 + ], + "type": "text", + "content": "Option C seems most appropriate as it shows curved lines forming an X-like pattern, similar to how the third figure in the first set has straight lines forming an X. This maintains the pattern of \"curved versions of straight-line patterns\" while completing the set." + } + ] + } + ], + "index": 65 + }, + { + "bbox": [ + 114, + 665, + 140, + 671 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 665, + 140, + 671 + ], + "spans": [ + { + "bbox": [ + 114, + 665, + 140, + 671 + ], + "type": "text", + "content": "Answer: C" + } + ] + } + ], + "index": 66 + }, + { + "bbox": [ + 113, + 674, + 232, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 674, + 232, + 685 + ], + "spans": [ + { + "bbox": [ + 113, + 674, + 232, + 685 + ], + "type": "text", + "content": "Error Category: Reasoning Error" + } + ] + } + ], + "index": 67 + }, + { + "bbox": [ + 113, + 685, + 496, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 685, + 496, + 712 + ], + "spans": [ + { + "bbox": [ + 113, + 685, + 496, + 712 + ], + "type": "text", + "content": "Error Reason: The model incorrectly chooses (C) because it overly focuses on surface-level visual similarity (the X-like shape) without accounting for the systematic transformation from the first row of blocks to the second row of blocks." + } + ] + } + ], + "index": 68 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "37" + } + ] + } + ], + "index": 70 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 36 + }, + { + "para_blocks": [ + { + "bbox": [ + 259, + 89, + 346, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 259, + 89, + 346, + 102 + ], + "spans": [ + { + "bbox": [ + 259, + 89, + 346, + 102 + ], + "type": "text", + "content": "Analogical (Hard)" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 363, + 89, + 375, + 100 + ], + "blocks": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "lines": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "spans": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "type": "image", + "image_path": "56af6eb71d6e2339d73f592d622135add9ce4337cf2f468cd37ce3f84689bcee.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 137, + 723, + 473, + 736 + ], + "lines": [ + { + "bbox": [ + 137, + 723, + 473, + 736 + ], + "spans": [ + { + "bbox": [ + 137, + 723, + 473, + 736 + ], + "type": "text", + "content": "Figure 25: A sample correct case of Analogical Reasoning (difficulty: Hard)." + } + ] + } + ], + "index": 77, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 110, + 110, + 247, + 135 + ], + "blocks": [ + { + "bbox": [ + 110, + 110, + 247, + 135 + ], + "lines": [ + { + "bbox": [ + 110, + 110, + 247, + 135 + ], + "spans": [ + { + "bbox": [ + 110, + 110, + 247, + 135 + ], + "type": "image", + "image_path": "f2b1736102b305edc25f255215e05f78a8d01fa59b7ea9760565a410ccf255c6.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 136, + 137, + 222, + 162 + ], + "blocks": [ + { + "bbox": [ + 136, + 137, + 222, + 162 + ], + "lines": [ + { + "bbox": [ + 136, + 137, + 222, + 162 + ], + "spans": [ + { + "bbox": [ + 136, + 137, + 222, + 162 + ], + "type": "image", + "image_path": "b8c7c0234db97446c1b2e4d704709d806fe90a0242a41fc9f438630674fb830f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 145, + 163, + 214, + 168 + ], + "lines": [ + { + "bbox": [ + 145, + 163, + 214, + 168 + ], + "spans": [ + { + "bbox": [ + 145, + 163, + 214, + 168 + ], + "type": "text", + "content": "A B C D" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 252, + 106, + 497, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 252, + 106, + 497, + 166 + ], + "spans": [ + { + "bbox": [ + 252, + 106, + 497, + 166 + ], + "type": "text", + "content": "Question: This question contains two sets of figures and 4 figures to choose from. The two sets of figures have certain similarities as well as differences. You are required to choose one option that is most suitable to replace the question mark. The correct answer should not only make the two sets of figures the most similar, but also shows uniqueness for the second set of figures." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 253, + 167, + 321, + 178 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 253, + 167, + 321, + 178 + ], + "spans": [ + { + "bbox": [ + 253, + 167, + 321, + 178 + ], + "type": "text", + "content": "Ground Truth: (C)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 113, + 182, + 299, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 182, + 299, + 190 + ], + "spans": [ + { + "bbox": [ + 113, + 182, + 299, + 190 + ], + "type": "text", + "content": "Claude-3.7-Sonnet-Thinking: Let me carefully analyze this problem." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 113, + 195, + 220, + 214 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 113, + 195, + 179, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 195, + 179, + 202 + ], + "spans": [ + { + "bbox": [ + 113, + 195, + 179, + 202 + ], + "type": "text", + "content": "We have two sets of figures:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 114, + 202, + 189, + 208 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 202, + 189, + 208 + ], + "spans": [ + { + "bbox": [ + 114, + 202, + 189, + 208 + ], + "type": "text", + "content": "- First set: Three figures in a row" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 114, + 208, + 220, + 214 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 208, + 220, + 214 + ], + "spans": [ + { + "bbox": [ + 114, + 208, + 220, + 214 + ], + "type": "text", + "content": "- Second set: Two figures and a question mark" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 219, + 495, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 219, + 495, + 227 + ], + "spans": [ + { + "bbox": [ + 113, + 219, + 495, + 227 + ], + "type": "text", + "content": "I need to find the best option (A, B, C, or D) to replace the question mark, such that the two sets become most similar while preserving the uniqueness of the second set." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 113, + 232, + 189, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 232, + 189, + 239 + ], + "spans": [ + { + "bbox": [ + 113, + 232, + 189, + 239 + ], + "type": "text", + "content": "First, let me analyze the first set:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 114, + 239, + 357, + 257 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 114, + 239, + 357, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 239, + 357, + 244 + ], + "spans": [ + { + "bbox": [ + 114, + 239, + 357, + 244 + ], + "type": "text", + "content": "1. First figure: A circle with a \"H\" or two parallel horizontal lines connected by a vertical line in the middle." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 114, + 244, + 290, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 244, + 290, + 251 + ], + "spans": [ + { + "bbox": [ + 114, + 244, + 290, + 251 + ], + "type": "text", + "content": "2. Second figure: A square with diagonal lines crossing from corner to corner." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 114, + 251, + 184, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 251, + 184, + 257 + ], + "spans": [ + { + "bbox": [ + 114, + 251, + 184, + 257 + ], + "type": "text", + "content": "3. Third figure: A simple circle." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 262, + 258, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 262, + 258, + 269 + ], + "spans": [ + { + "bbox": [ + 113, + 262, + 258, + 269 + ], + "type": "text", + "content": "Now, let me analyze the second set (before the question mark):" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 114, + 270, + 369, + 289 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 114, + 270, + 369, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 270, + 369, + 275 + ], + "spans": [ + { + "bbox": [ + 114, + 270, + 369, + 275 + ], + "type": "text", + "content": "1. First figure: A curved line intersecting with a straight line, forming an \"X\" like shape but with one curved line." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 114, + 276, + 342, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 276, + 342, + 282 + ], + "spans": [ + { + "bbox": [ + 114, + 276, + 342, + 282 + ], + "type": "text", + "content": "2. Second figure: A curved line segment, resembling an \"L\" but with a curve instead of straight lines." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 114, + 282, + 235, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 282, + 235, + 289 + ], + "spans": [ + { + "bbox": [ + 114, + 282, + 235, + 289 + ], + "type": "text", + "content": "3. Question mark: This is what we need to figure out." + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 293, + 171, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 293, + 171, + 300 + ], + "spans": [ + { + "bbox": [ + 113, + 293, + 171, + 300 + ], + "type": "text", + "content": "Let's look at the options:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 114, + 300, + 299, + 325 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 114, + 300, + 258, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 300, + 258, + 307 + ], + "spans": [ + { + "bbox": [ + 114, + 300, + 258, + 307 + ], + "type": "text", + "content": "A: Two crossing lines with a small curve at one end of one line." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 114, + 307, + 208, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 307, + 208, + 312 + ], + "spans": [ + { + "bbox": [ + 114, + 307, + 208, + 312 + ], + "type": "text", + "content": "B: Two curved lines crossing in an \"X\" for" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 114, + 312, + 299, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 312, + 299, + 319 + ], + "spans": [ + { + "bbox": [ + 114, + 312, + 299, + 319 + ], + "type": "text", + "content": "C: A circle with two perpendicular lines crossing it, forming four equal segments." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 114, + 319, + 275, + 325 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 319, + 275, + 325 + ], + "spans": [ + { + "bbox": [ + 114, + 319, + 275, + 325 + ], + "type": "text", + "content": "D: A circular pattern with curved lines, creating a swirl or spiral effect." + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 331, + 286, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 331, + 286, + 338 + ], + "spans": [ + { + "bbox": [ + 113, + 331, + 286, + 338 + ], + "type": "text", + "content": "Now, let me try to identify the pattern or relationship between the two sets:" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 113, + 342, + 405, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 342, + 405, + 350 + ], + "spans": [ + { + "bbox": [ + 113, + 342, + 405, + 350 + ], + "type": "text", + "content": "In the first set, we start with a circle containing a shape (H), then move to a square with crossed lines, and finally a simple circle." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 113, + 355, + 496, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 355, + 496, + 369 + ], + "spans": [ + { + "bbox": [ + 113, + 355, + 496, + 369 + ], + "type": "text", + "content": "In the second set, we start with crossed lines (one curved), then a curved line segment. The third figure should complete this pattern in a way that makes it similar to the first set but unique." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 113, + 373, + 247, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 373, + 247, + 380 + ], + "spans": [ + { + "bbox": [ + 113, + 373, + 247, + 380 + ], + "type": "text", + "content": "Looking at the transformation from first set to second set:" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 114, + 380, + 335, + 399 + ], + "type": "list", + "angle": 0, + "index": 37, + "blocks": [ + { + "bbox": [ + 114, + 380, + 290, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 380, + 290, + 387 + ], + "spans": [ + { + "bbox": [ + 114, + 380, + 290, + 387 + ], + "type": "text", + "content": "1. The first figure in both sets involves lines (H-shape vs X-shape with curve)" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 114, + 387, + 335, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 387, + 335, + 393 + ], + "spans": [ + { + "bbox": [ + 114, + 387, + 335, + 393 + ], + "type": "text", + "content": "2. The second figure in both sets involves diagonal/angled elements (crossed square vs curved L)" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 114, + 393, + 227, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 393, + 227, + 399 + ], + "spans": [ + { + "bbox": [ + 114, + 393, + 227, + 399 + ], + "type": "text", + "content": "3. The third figure in the first set is a simple circle" + } + ] + } + ], + "index": 36 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 404, + 283, + 412 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 404, + 283, + 412 + ], + "spans": [ + { + "bbox": [ + 113, + 404, + 283, + 412 + ], + "type": "text", + "content": "So what would make the third figure in the second set similar yet unique?" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 113, + 416, + 496, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 416, + 496, + 430 + ], + "spans": [ + { + "bbox": [ + 113, + 416, + 496, + 430 + ], + "type": "text", + "content": "Option C stands out because it's a circle (like the third figure in the first set) but with internal lines dividing it (maintaining the \"line\" theme of the second set). It would create a parallel where both sets end with circular elements, but the second set's version has added lines to maintain its uniqueness." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 113, + 434, + 488, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 434, + 488, + 443 + ], + "spans": [ + { + "bbox": [ + 113, + 434, + 488, + 443 + ], + "type": "text", + "content": "Let me double-check by considering another angle. Maybe there's a transformation rule from each figure in the first set to the corresponding figure in the second set:" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 113, + 448, + 429, + 467 + ], + "type": "list", + "angle": 0, + "index": 44, + "blocks": [ + { + "bbox": [ + 113, + 448, + 330, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 448, + 330, + 455 + ], + "spans": [ + { + "bbox": [ + 113, + 448, + 330, + 455 + ], + "type": "text", + "content": "1. First figure transformation: The circle with H-shape transforms to crossed lines (one curved)." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 114, + 456, + 320, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 456, + 320, + 461 + ], + "spans": [ + { + "bbox": [ + 114, + 456, + 320, + 461 + ], + "type": "text", + "content": "2. Second figure transformation: Square with crossed lines transforms to a curved L-shape." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 114, + 461, + 429, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 461, + 429, + 467 + ], + "spans": [ + { + "bbox": [ + 114, + 461, + 429, + 467 + ], + "type": "text", + "content": "3. Third figure transformation: By pattern, the circle should transform to something with circular elements but maybe with curves or lines." + } + ] + } + ], + "index": 43 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 472, + 409, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 472, + 409, + 479 + ], + "spans": [ + { + "bbox": [ + 113, + 472, + 409, + 479 + ], + "type": "text", + "content": "Looking at options again, C and D both have circular elements. C has straight lines dividing the circle while D has a swirl pattern." + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 113, + 484, + 477, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 484, + 477, + 492 + ], + "spans": [ + { + "bbox": [ + 113, + 484, + 477, + 492 + ], + "type": "text", + "content": "Option C seems most consistent with the pattern of the second set (which involves lines) while maintaining similarity to the third figure of the first set (a circle)." + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 113, + 497, + 205, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 497, + 205, + 503 + ], + "spans": [ + { + "bbox": [ + 113, + 497, + 205, + 503 + ], + "type": "text", + "content": "So I believe C is the most likely answer." + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 114, + 504, + 152, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 504, + 152, + 510 + ], + "spans": [ + { + "bbox": [ + 114, + 504, + 152, + 510 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 114, + 510, + 312, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 510, + 312, + 517 + ], + "spans": [ + { + "bbox": [ + 114, + 510, + 312, + 517 + ], + "type": "text", + "content": "I'll solve this step by step by analyzing the two sets of figures and looking for patterns." + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 113, + 521, + 166, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 521, + 166, + 528 + ], + "spans": [ + { + "bbox": [ + 113, + 521, + 166, + 528 + ], + "type": "text", + "content": "**First Set of Figures:**" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 114, + 528, + 294, + 547 + ], + "type": "list", + "angle": 0, + "index": 54, + "blocks": [ + { + "bbox": [ + 114, + 528, + 294, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 528, + 294, + 534 + ], + "spans": [ + { + "bbox": [ + 114, + 528, + 294, + 534 + ], + "type": "text", + "content": "1. A circle with a \"H\" shape inside (horizontal lines connected by a vertical line)" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 114, + 534, + 204, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 534, + 204, + 540 + ], + "spans": [ + { + "bbox": [ + 114, + 534, + 204, + 540 + ], + "type": "text", + "content": "2. A square with diagonal crossed lines" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 114, + 540, + 153, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 540, + 153, + 547 + ], + "spans": [ + { + "bbox": [ + 114, + 540, + 153, + 547 + ], + "type": "text", + "content": "3. A simple circle" + } + ] + } + ], + "index": 53 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 114, + 553, + 173, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 553, + 173, + 559 + ], + "spans": [ + { + "bbox": [ + 114, + 553, + 173, + 559 + ], + "type": "text", + "content": "**Second Set of Figures:**" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 114, + 559, + 234, + 578 + ], + "type": "list", + "angle": 0, + "index": 59, + "blocks": [ + { + "bbox": [ + 114, + 559, + 234, + 565 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 559, + 234, + 565 + ], + "spans": [ + { + "bbox": [ + 114, + 559, + 234, + 565 + ], + "type": "text", + "content": "1. An intersection of a straight line and a curved line" + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 114, + 565, + 193, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 565, + 193, + 571 + ], + "spans": [ + { + "bbox": [ + 114, + 565, + 193, + 571 + ], + "type": "text", + "content": "2. A curved line in an L-like shape" + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 114, + 571, + 212, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 571, + 212, + 578 + ], + "spans": [ + { + "bbox": [ + 114, + 571, + 212, + 578 + ], + "type": "text", + "content": "3. Question mark - needs to be determined" + } + ] + } + ], + "index": 58 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 114, + 583, + 177, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 583, + 177, + 589 + ], + "spans": [ + { + "bbox": [ + 114, + 583, + 177, + 589 + ], + "type": "text", + "content": "**Looking for the Pattern:**" + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 114, + 589, + 346, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 589, + 346, + 596 + ], + "spans": [ + { + "bbox": [ + 114, + 589, + 346, + 596 + ], + "type": "text", + "content": "I need to understand how these sets relate to each other to find the right option for the question mark." + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 113, + 601, + 496, + 712 + ], + "type": "list", + "angle": 0, + "index": 76, + "blocks": [ + { + "bbox": [ + 113, + 601, + 436, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 601, + 436, + 609 + ], + "spans": [ + { + "bbox": [ + 113, + 601, + 436, + 609 + ], + "type": "text", + "content": "The first figures in each set involve lines: one has organized parallel lines in a circle, the other has intersecting lines (one curved, one straight)." + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 114, + 609, + 405, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 609, + 405, + 615 + ], + "spans": [ + { + "bbox": [ + 114, + 609, + 405, + 615 + ], + "type": "text", + "content": "The second figures in each set involve lines at angles: one has crossed diagonal lines in a square, the other has a curved L-shape." + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 114, + 615, + 496, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 615, + 496, + 628 + ], + "spans": [ + { + "bbox": [ + 114, + 615, + 496, + 628 + ], + "type": "text", + "content": "For the third figure, the first set has a simple circle. Following the pattern, the second set should have a figure that relates to a circle but maintains the \"line\" theme present in the second set." + } + ] + } + ], + "index": 64 + }, + { + "bbox": [ + 114, + 632, + 176, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 632, + 176, + 639 + ], + "spans": [ + { + "bbox": [ + 114, + 632, + 176, + 639 + ], + "type": "text", + "content": "**Analyzing the Options:**" + } + ] + } + ], + "index": 65 + }, + { + "bbox": [ + 114, + 639, + 205, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 639, + 205, + 645 + ], + "spans": [ + { + "bbox": [ + 114, + 639, + 205, + 645 + ], + "type": "text", + "content": "A: Two crossed lines with a curved end" + } + ] + } + ], + "index": 66 + }, + { + "bbox": [ + 114, + 645, + 215, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 645, + 215, + 651 + ], + "spans": [ + { + "bbox": [ + 114, + 645, + 215, + 651 + ], + "type": "text", + "content": "B: Two curved lines crossing in an X pattern" + } + ] + } + ], + "index": 67 + }, + { + "bbox": [ + 114, + 651, + 257, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 651, + 257, + 658 + ], + "spans": [ + { + "bbox": [ + 114, + 651, + 257, + 658 + ], + "type": "text", + "content": "C: A circle divided into four equal parts by perpendicular lines" + } + ] + } + ], + "index": 68 + }, + { + "bbox": [ + 114, + 658, + 197, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 658, + 197, + 664 + ], + "spans": [ + { + "bbox": [ + 114, + 658, + 197, + 664 + ], + "type": "text", + "content": "D: A swirl pattern with curved lines" + } + ] + } + ], + "index": 69 + }, + { + "bbox": [ + 114, + 670, + 144, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 670, + 144, + 676 + ], + "spans": [ + { + "bbox": [ + 114, + 670, + 144, + 676 + ], + "type": "text", + "content": "**Decision:**" + } + ] + } + ], + "index": 70 + }, + { + "bbox": [ + 114, + 676, + 207, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 676, + 207, + 682 + ], + "spans": [ + { + "bbox": [ + 114, + 676, + 207, + 682 + ], + "type": "text", + "content": "Option C makes the most sense because:" + } + ] + } + ], + "index": 71 + }, + { + "bbox": [ + 114, + 682, + 257, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 682, + 257, + 689 + ], + "spans": [ + { + "bbox": [ + 114, + 682, + 257, + 689 + ], + "type": "text", + "content": "1. It contains a circle (relating to the third figure in the first set)" + } + ] + } + ], + "index": 72 + }, + { + "bbox": [ + 114, + 689, + 279, + 695 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 689, + 279, + 695 + ], + "spans": [ + { + "bbox": [ + 114, + 689, + 279, + 695 + ], + "type": "text", + "content": "2. It includes straight lines (maintaining the line theme of the second set)" + } + ] + } + ], + "index": 73 + }, + { + "bbox": [ + 114, + 695, + 416, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 695, + 416, + 701 + ], + "spans": [ + { + "bbox": [ + 114, + 695, + 416, + 701 + ], + "type": "text", + "content": "3. The division of the circle into four parts creates a unique element for the second set while still maintaining similarity to the first set" + } + ] + } + ], + "index": 74 + }, + { + "bbox": [ + 114, + 706, + 139, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 706, + 139, + 712 + ], + "spans": [ + { + "bbox": [ + 114, + 706, + 139, + 712 + ], + "type": "text", + "content": "Answer: C" + } + ] + } + ], + "index": 75 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "38" + } + ] + } + ], + "index": 78 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 37 + }, + { + "para_blocks": [ + { + "bbox": [ + 261, + 89, + 343, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 261, + 89, + 343, + 102 + ], + "spans": [ + { + "bbox": [ + 261, + 89, + 343, + 102 + ], + "type": "text", + "content": "Deductive (Easy)" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 363, + 89, + 375, + 101 + ], + "blocks": [ + { + "bbox": [ + 363, + 89, + 375, + 101 + ], + "lines": [ + { + "bbox": [ + 363, + 89, + 375, + 101 + ], + "spans": [ + { + "bbox": [ + 363, + 89, + 375, + 101 + ], + "type": "image", + "image_path": "583d34c288f215b97b70b7361ce23897f12f100ad60e17ac83d9a8aebba2de98.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 143, + 723, + 466, + 736 + ], + "lines": [ + { + "bbox": [ + 143, + 723, + 466, + 736 + ], + "spans": [ + { + "bbox": [ + 143, + 723, + 466, + 736 + ], + "type": "text", + "content": "Figure 26: A sample error case of Deductive Reasoning (difficulty: Easy)." + } + ] + } + ], + "index": 69, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 113, + 106, + 196, + 190 + ], + "blocks": [ + { + "bbox": [ + 113, + 106, + 196, + 190 + ], + "lines": [ + { + "bbox": [ + 113, + 106, + 196, + 190 + ], + "spans": [ + { + "bbox": [ + 113, + 106, + 196, + 190 + ], + "type": "table", + "html": "
GOBDE
NPKAJ
OZGFQ
WVIYN
IQTH
", + "image_path": "14ab2b1c55d80d22512d08289291b338fbb40e27ab710ad313761a8d7b7d6cb2.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 204, + 121, + 497, + 151 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 204, + 121, + 497, + 151 + ], + "spans": [ + { + "bbox": [ + 204, + 121, + 497, + 151 + ], + "type": "text", + "content": "Question: Bruce ciphers words adopting the board demonstrated. For example, the word DVEJ is ciphered as GT WI GH NH. What word did Bruce cipher NQ OH OT WI?" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 205, + 152, + 383, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 205, + 152, + 383, + 162 + ], + "spans": [ + { + "bbox": [ + 205, + 152, + 383, + 162 + ], + "type": "text", + "content": "Options: A: XOCR. B: DJWS. C: KQFV. D: KFQV" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 205, + 167, + 274, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 205, + 167, + 274, + 178 + ], + "spans": [ + { + "bbox": [ + 205, + 167, + 274, + 178 + ], + "type": "text", + "content": "Ground Truth: (C)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 113, + 196, + 497, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 196, + 497, + 211 + ], + "spans": [ + { + "bbox": [ + 113, + 196, + 497, + 211 + ], + "type": "text", + "content": "Claude-3.7-Sonnet-Thinking: I see a 5x5 grid of letters that appears to be the board used for Bruce's cipher. Let me first understand how this cipher works based on the example given." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 113, + 214, + 281, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 214, + 281, + 223 + ], + "spans": [ + { + "bbox": [ + 113, + 214, + 281, + 223 + ], + "type": "text", + "content": "The example states that the word \"DVEJ\" is ciphered as \"GT WI GH NH\"." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 113, + 227, + 177, + 234 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 227, + 177, + 234 + ], + "spans": [ + { + "bbox": [ + 113, + 227, + 177, + 234 + ], + "type": "text", + "content": "Let me analyze the pattern:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 114, + 234, + 164, + 259 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 114, + 234, + 163, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 234, + 163, + 242 + ], + "spans": [ + { + "bbox": [ + 114, + 234, + 163, + 242 + ], + "type": "text", + "content": "D is ciphered as GT" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 114, + 243, + 164, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 243, + 164, + 251 + ], + "spans": [ + { + "bbox": [ + 114, + 243, + 164, + 251 + ], + "type": "text", + "content": "V is ciphered as W1 E is ciphered as GH" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 114, + 251, + 163, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 251, + 163, + 259 + ], + "spans": [ + { + "bbox": [ + 114, + 251, + 163, + 259 + ], + "type": "text", + "content": "- 1 is ciphered as GH\n- j is ciphered as NH" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 264, + 321, + 272 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 264, + 321, + 272 + ], + "spans": [ + { + "bbox": [ + 113, + 264, + 321, + 272 + ], + "type": "text", + "content": "I need to figure out the rule that transforms each letter into two letters based on this board." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 113, + 276, + 283, + 334 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 113, + 276, + 283, + 284 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 276, + 283, + 284 + ], + "spans": [ + { + "bbox": [ + 113, + 276, + 283, + 284 + ], + "type": "text", + "content": "Looking at the board, I can see that the first column contains G, N, O, W, I." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 114, + 285, + 195, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 285, + 195, + 289 + ], + "spans": [ + { + "bbox": [ + 114, + 285, + 195, + 289 + ], + "type": "text", + "content": "The first row contains G,O,B,D,E" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 114, + 289, + 237, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 289, + 237, + 297 + ], + "spans": [ + { + "bbox": [ + 114, + 289, + 237, + 297 + ], + "type": "text", + "content": "The left column and top row are highlighted in green." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 114, + 297, + 208, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 297, + 208, + 303 + ], + "spans": [ + { + "bbox": [ + 114, + 297, + 208, + 303 + ], + "type": "text", + "content": "The bottom row is highlighted in yellow." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 114, + 308, + 219, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 308, + 219, + 315 + ], + "spans": [ + { + "bbox": [ + 114, + 308, + 219, + 315 + ], + "type": "text", + "content": "Let me try to understand how D becomes GT:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 114, + 315, + 241, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 315, + 241, + 321 + ], + "spans": [ + { + "bbox": [ + 114, + 315, + 241, + 321 + ], + "type": "text", + "content": "- Looking at the board, D is in the first row, 4th column." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 114, + 321, + 215, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 321, + 215, + 327 + ], + "spans": [ + { + "bbox": [ + 114, + 321, + 215, + 327 + ], + "type": "text", + "content": "-G is the letter in the first row, first column." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 114, + 327, + 222, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 327, + 222, + 334 + ], + "spans": [ + { + "bbox": [ + 114, + 327, + 222, + 334 + ], + "type": "text", + "content": "- T is not directly visible in the board as shown." + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 338, + 356, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 338, + 356, + 346 + ], + "spans": [ + { + "bbox": [ + 113, + 338, + 356, + 346 + ], + "type": "text", + "content": "Let me try a different approach. Maybe the cipher works by identifying the row and column of each letter." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 113, + 350, + 151, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 350, + 151, + 357 + ], + "spans": [ + { + "bbox": [ + 113, + 350, + 151, + 357 + ], + "type": "text", + "content": "For the letter D:" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 114, + 357, + 312, + 376 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 114, + 357, + 192, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 357, + 192, + 364 + ], + "spans": [ + { + "bbox": [ + 114, + 357, + 192, + 364 + ], + "type": "text", + "content": "- D is in the first row, 4th column." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 114, + 364, + 209, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 364, + 209, + 370 + ], + "spans": [ + { + "bbox": [ + 114, + 364, + 209, + 370 + ], + "type": "text", + "content": "- The letter at the beginning of row 1 is G." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 114, + 370, + 312, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 370, + 312, + 376 + ], + "spans": [ + { + "bbox": [ + 114, + 370, + 312, + 376 + ], + "type": "text", + "content": "- The letter at the beginning of column 4 might be T (though I can't see it in this board)." + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 381, + 150, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 381, + 150, + 388 + ], + "spans": [ + { + "bbox": [ + 113, + 381, + 150, + 388 + ], + "type": "text", + "content": "For the letter V:" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 114, + 388, + 217, + 401 + ], + "type": "list", + "angle": 0, + "index": 33, + "blocks": [ + { + "bbox": [ + 114, + 388, + 217, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 388, + 217, + 395 + ], + "spans": [ + { + "bbox": [ + 114, + 388, + 217, + 395 + ], + "type": "text", + "content": "- V appears to be in the 4th row, 2nd column." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 114, + 395, + 210, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 395, + 210, + 401 + ], + "spans": [ + { + "bbox": [ + 114, + 395, + 210, + 401 + ], + "type": "text", + "content": "- The letter at the beginning of row 4 is W." + } + ] + } + ], + "index": 32 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 401, + 297, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 401, + 297, + 407 + ], + "spans": [ + { + "bbox": [ + 113, + 401, + 297, + 407 + ], + "type": "text", + "content": "- The letter at the beginning of column 2 might be I (though I can't see it clearly)." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 113, + 411, + 495, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 411, + 495, + 426 + ], + "spans": [ + { + "bbox": [ + 113, + 411, + 495, + 426 + ], + "type": "text", + "content": "Looking more closely at the board, I notice that there's no column heading visible, but maybe the bottom row (I, Q, T, H) might be the column identifiers. This seems plausible." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 113, + 430, + 190, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 430, + 190, + 437 + ], + "spans": [ + { + "bbox": [ + 113, + 430, + 190, + 437 + ], + "type": "text", + "content": "Let me check if this makes sense:" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 114, + 437, + 291, + 451 + ], + "type": "list", + "angle": 0, + "index": 39, + "blocks": [ + { + "bbox": [ + 114, + 437, + 291, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 437, + 291, + 445 + ], + "spans": [ + { + "bbox": [ + 114, + 437, + 291, + 445 + ], + "type": "text", + "content": "- D is in row 1, column " + }, + { + "bbox": [ + 114, + 437, + 291, + 445 + ], + "type": "inline_equation", + "content": "4 \\rightarrow G" + }, + { + "bbox": [ + 114, + 437, + 291, + 445 + ], + "type": "text", + "content": " (row 1 identifier) + T (column 4 identifier) = GT" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 114, + 445, + 280, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 445, + 280, + 451 + ], + "spans": [ + { + "bbox": [ + 114, + 445, + 280, + 451 + ], + "type": "text", + "content": "- V is in row 4, column " + }, + { + "bbox": [ + 114, + 445, + 280, + 451 + ], + "type": "inline_equation", + "content": "2 \\rightarrow W" + }, + { + "bbox": [ + 114, + 445, + 280, + 451 + ], + "type": "text", + "content": " (row 4 identifier) + Q (column 2 identifier)" + } + ] + } + ], + "index": 38 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 455, + 277, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 455, + 277, + 463 + ], + "spans": [ + { + "bbox": [ + 113, + 455, + 277, + 463 + ], + "type": "text", + "content": "But this doesn't match the example where V is ciphered as WI, not WQ." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 113, + 467, + 310, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 467, + 310, + 475 + ], + "spans": [ + { + "bbox": [ + 113, + 467, + 310, + 475 + ], + "type": "text", + "content": "Maybe the column identifiers are different or the rule is different. Let me re-examine..." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 113, + 479, + 477, + 488 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 479, + 477, + 488 + ], + "spans": [ + { + "bbox": [ + 113, + 479, + 477, + 488 + ], + "type": "text", + "content": "Actually, looking at the board again, I see that the column headers are likely the first row (G, O, B, D, E) and the row headers are the first column (G, N, O, W, I)." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 113, + 492, + 123, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 492, + 123, + 498 + ], + "spans": [ + { + "bbox": [ + 113, + 492, + 123, + 498 + ], + "type": "text", + "content": "So:" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 114, + 498, + 283, + 512 + ], + "type": "list", + "angle": 0, + "index": 46, + "blocks": [ + { + "bbox": [ + 114, + 498, + 283, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 498, + 283, + 505 + ], + "spans": [ + { + "bbox": [ + 114, + 498, + 283, + 505 + ], + "type": "text", + "content": "- D is at position (1, 4), so its coordinates would be GD (row G, column D)." + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 114, + 505, + 208, + 512 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 505, + 208, + 512 + ], + "spans": [ + { + "bbox": [ + 114, + 505, + 208, + 512 + ], + "type": "text", + "content": "- V isn't clearly visible in the grid shown." + } + ] + } + ], + "index": 45 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 516, + 422, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 516, + 422, + 525 + ], + "spans": [ + { + "bbox": [ + 113, + 516, + 422, + 525 + ], + "type": "text", + "content": "Let me try yet another approach. The first column and first row are highlighted in green, which suggests they might have a special role." + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 113, + 529, + 156, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 529, + 156, + 536 + ], + "spans": [ + { + "bbox": [ + 113, + 529, + 156, + 536 + ], + "type": "text", + "content": "Maybe the rule is:" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 114, + 536, + 399, + 549 + ], + "type": "list", + "angle": 0, + "index": 51, + "blocks": [ + { + "bbox": [ + 114, + 536, + 265, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 536, + 265, + 542 + ], + "spans": [ + { + "bbox": [ + 114, + 536, + 265, + 542 + ], + "type": "text", + "content": "- For each letter in the original word, find its position in the board." + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 114, + 542, + 399, + 549 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 542, + 399, + 549 + ], + "spans": [ + { + "bbox": [ + 114, + 542, + 399, + 549 + ], + "type": "text", + "content": "- The ciphered result is the corresponding letter from the first column followed by the corresponding letter from the first row." + } + ] + } + ], + "index": 50 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 553, + 168, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 553, + 168, + 560 + ], + "spans": [ + { + "bbox": [ + 113, + 553, + 168, + 560 + ], + "type": "text", + "content": "Let me check this for D:" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 113, + 560, + 463, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 560, + 463, + 567 + ], + "spans": [ + { + "bbox": [ + 113, + 560, + 463, + 567 + ], + "type": "text", + "content": "- If D is at position (r, c) in the board, the ciphered result would be the letter in the first column at row r, followed by the letter in the first row at column c." + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 113, + 571, + 271, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 571, + 271, + 580 + ], + "spans": [ + { + "bbox": [ + 113, + 571, + 271, + 580 + ], + "type": "text", + "content": "But I still don't see how D becomes GT based on the board provided." + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 113, + 584, + 304, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 584, + 304, + 591 + ], + "spans": [ + { + "bbox": [ + 113, + 584, + 304, + 591 + ], + "type": "text", + "content": "Wait, maybe I need to interpret the board differently. Looking at the board, I notice:" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 114, + 591, + 266, + 641 + ], + "type": "list", + "angle": 0, + "index": 64, + "blocks": [ + { + "bbox": [ + 114, + 591, + 193, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 591, + 193, + 597 + ], + "spans": [ + { + "bbox": [ + 114, + 591, + 193, + 597 + ], + "type": "text", + "content": "- G is in the first row, first column." + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 114, + 597, + 301, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 597, + 301, + 605 + ], + "spans": [ + { + "bbox": [ + 114, + 597, + 301, + 605 + ], + "type": "text", + "content": "- T might be in the bottom row, 3rd column (though it's labeled as T in the image)." + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 114, + 605, + 193, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 605, + 193, + 611 + ], + "spans": [ + { + "bbox": [ + 114, + 605, + 193, + 611 + ], + "type": "text", + "content": "- W is in the 4th row, first column." + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 114, + 611, + 266, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 611, + 266, + 616 + ], + "spans": [ + { + "bbox": [ + 114, + 611, + 266, + 616 + ], + "type": "text", + "content": "- I is in the 3rd row, 3rd column (or in the bottom row, 1st column)." + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 114, + 616, + 193, + 623 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 616, + 193, + 623 + ], + "spans": [ + { + "bbox": [ + 114, + 616, + 193, + 623 + ], + "type": "text", + "content": "- G is in the first row, first column." + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 114, + 623, + 198, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 623, + 198, + 628 + ], + "spans": [ + { + "bbox": [ + 114, + 623, + 198, + 628 + ], + "type": "text", + "content": "-H is in the bottom row, 4th column." + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 114, + 628, + 193, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 628, + 193, + 635 + ], + "spans": [ + { + "bbox": [ + 114, + 628, + 193, + 635 + ], + "type": "text", + "content": "- N is in the 2nd row, first column." + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 114, + 635, + 198, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 635, + 198, + 641 + ], + "spans": [ + { + "bbox": [ + 114, + 635, + 198, + 641 + ], + "type": "text", + "content": "- H is in the bottom row, 4th column." + } + ] + } + ], + "index": 63 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 646, + 192, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 646, + 192, + 653 + ], + "spans": [ + { + "bbox": [ + 113, + 646, + 192, + 653 + ], + "type": "text", + "content": "I'm still not seeing a clear pattern." + } + ] + } + ], + "index": 65 + }, + { + "bbox": [ + 114, + 653, + 153, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 653, + 153, + 660 + ], + "spans": [ + { + "bbox": [ + 114, + 653, + 153, + 660 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 66 + }, + { + "bbox": [ + 113, + 663, + 234, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 663, + 234, + 673 + ], + "spans": [ + { + "bbox": [ + 113, + 663, + 234, + 673 + ], + "type": "text", + "content": "Error Category: Reject to Answer" + } + ] + } + ], + "index": 67 + }, + { + "bbox": [ + 113, + 673, + 497, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 673, + 497, + 712 + ], + "spans": [ + { + "bbox": [ + 113, + 673, + 497, + 712 + ], + "type": "text", + "content": "Error Reason: The model reject to answer since it fails to settle on a single consistent ciphering rule or final answer. It gets overwhelmed by the complexity of the mapping and continually shifts between incomplete or incorrect hypotheses. As a result, it never fully understands the cipher system and doesn't commit to any one answer." + } + ] + } + ], + "index": 68 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "39" + } + ] + } + ], + "index": 70 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 38 + }, + { + "para_blocks": [ + { + "bbox": [ + 261, + 88, + 343, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 261, + 88, + 343, + 102 + ], + "spans": [ + { + "bbox": [ + 261, + 88, + 343, + 102 + ], + "type": "text", + "content": "Deductive (Easy)" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 363, + 89, + 375, + 100 + ], + "blocks": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "lines": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "spans": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "type": "image", + "image_path": "16fa6aae6f434bae2f7c980d27911263d2f0d6dcdde43098f76431b41a04006d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 130, + 110, + 312, + 228 + ], + "blocks": [ + { + "bbox": [ + 130, + 110, + 312, + 228 + ], + "lines": [ + { + "bbox": [ + 130, + 110, + 312, + 228 + ], + "spans": [ + { + "bbox": [ + 130, + 110, + 312, + 228 + ], + "type": "image", + "image_path": "13c356b90f683f73e24d1a4dadc822c871d32f6cf7b204e3c8cfa4b0e7433dad.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 138, + 723, + 470, + 736 + ], + "lines": [ + { + "bbox": [ + 138, + 723, + 470, + 736 + ], + "spans": [ + { + "bbox": [ + 138, + 723, + 470, + 736 + ], + "type": "text", + "content": "Figure 27: A sample correct case of Deductive Reasoning (difficulty: Easy)." + } + ] + } + ], + "index": 44, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 335, + 124, + 490, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 124, + 490, + 182 + ], + "spans": [ + { + "bbox": [ + 335, + 124, + 490, + 182 + ], + "type": "text", + "content": "Question: From the above food web diagram, what would happen to dragonfly if all mayfly die? Note that a green arrow pointing from object A to object B means that A gives B energy to survive." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 335, + 182, + 487, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 182, + 487, + 201 + ], + "spans": [ + { + "bbox": [ + 335, + 182, + 487, + 201 + ], + "type": "text", + "content": "Options: A: remains the same. B: increase. C: decrease. D: NA" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 336, + 209, + 405, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 336, + 209, + 405, + 220 + ], + "spans": [ + { + "bbox": [ + 336, + 209, + 405, + 220 + ], + "type": "text", + "content": "Ground Truth: (C)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 112, + 236, + 495, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 236, + 495, + 258 + ], + "spans": [ + { + "bbox": [ + 112, + 236, + 495, + 258 + ], + "type": "text", + "content": "Claude-3.7-Sonnet-Thinking: Let me analyze the food web diagram to understand the relationships between different organisms." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 113, + 266, + 176, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 266, + 176, + 275 + ], + "spans": [ + { + "bbox": [ + 113, + 266, + 176, + 275 + ], + "type": "text", + "content": "In this food web:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 113, + 275, + 488, + 296 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 113, + 275, + 488, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 275, + 488, + 285 + ], + "spans": [ + { + "bbox": [ + 113, + 275, + 488, + 285 + ], + "type": "text", + "content": "- Green arrows point from an organism that gives energy to another organism (food source " + }, + { + "bbox": [ + 113, + 275, + 488, + 285 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 113, + 275, + 488, + 285 + ], + "type": "text", + "content": " consumer)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 113, + 285, + 425, + 296 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 285, + 425, + 296 + ], + "spans": [ + { + "bbox": [ + 113, + 285, + 425, + 296 + ], + "type": "text", + "content": "- I need to examine what would happen to dragonfly nymphs if all mayfly nymphs die" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 304, + 202, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 304, + 202, + 315 + ], + "spans": [ + { + "bbox": [ + 113, + 304, + 202, + 315 + ], + "type": "text", + "content": "Looking at the diagram:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 113, + 315, + 462, + 354 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 113, + 315, + 370, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 315, + 370, + 324 + ], + "spans": [ + { + "bbox": [ + 113, + 315, + 370, + 324 + ], + "type": "text", + "content": "- Mayfly nymphs feed on pondweed (arrow from pondweed to mayfly)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 113, + 324, + 397, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 324, + 397, + 334 + ], + "spans": [ + { + "bbox": [ + 113, + 324, + 397, + 334 + ], + "type": "text", + "content": "- Dragonfly nymphs feed on mayfly nymphs (arrow from mayfly to dragonfly)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 113, + 334, + 462, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 334, + 462, + 344 + ], + "spans": [ + { + "bbox": [ + 113, + 334, + 462, + 344 + ], + "type": "text", + "content": "- Dragonfly nymphs also feed on freshwater shrimp (arrow from freshwater shrimp to dragonfly)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 113, + 344, + 403, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 344, + 403, + 354 + ], + "spans": [ + { + "bbox": [ + 113, + 344, + 403, + 354 + ], + "type": "text", + "content": "- Brown trout feeds on dragonfly nymphs (arrow from dragonfly to brown trout)" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 362, + 205, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 362, + 205, + 373 + ], + "spans": [ + { + "bbox": [ + 113, + 362, + 205, + 373 + ], + "type": "text", + "content": "If all mayfly nymphs die:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 113, + 373, + 430, + 403 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 113, + 373, + 383, + 383 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 373, + 383, + 383 + ], + "spans": [ + { + "bbox": [ + 113, + 373, + 383, + 383 + ], + "type": "text", + "content": "- Dragonfly nymphs would lose one of their food sources (mayfly nymphs)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 113, + 383, + 375, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 383, + 375, + 392 + ], + "spans": [ + { + "bbox": [ + 113, + 383, + 375, + 392 + ], + "type": "text", + "content": "- Dragonfly nymphs would still have freshwater shrimp as a food source" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 113, + 392, + 430, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 392, + 430, + 403 + ], + "spans": [ + { + "bbox": [ + 113, + 392, + 430, + 403 + ], + "type": "text", + "content": "- But losing mayfly nymphs would reduce the total available food for dragonfly nymphs" + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 112, + 411, + 496, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 411, + 496, + 440 + ], + "spans": [ + { + "bbox": [ + 112, + 411, + 496, + 440 + ], + "type": "text", + "content": "Therefore, the population of dragonfly nymphs would likely decrease because one of their food sources is gone, although they would not go extinct since they have another food source. This makes the correct answer (C) decrease." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 113, + 441, + 172, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 441, + 172, + 450 + ], + "spans": [ + { + "bbox": [ + 113, + 441, + 172, + 450 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 113, + 450, + 321, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 450, + 321, + 460 + ], + "spans": [ + { + "bbox": [ + 113, + 450, + 321, + 460 + ], + "type": "text", + "content": "Analysis of Food Web and Effect on Dragonfly Nymphs" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 112, + 468, + 496, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 468, + 496, + 490 + ], + "spans": [ + { + "bbox": [ + 112, + 468, + 496, + 490 + ], + "type": "text", + "content": "Looking at the food web diagram, I need to determine what would happen to dragonfly nymphs if all mayfly nymphs died out." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 113, + 498, + 230, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 498, + 230, + 508 + ], + "spans": [ + { + "bbox": [ + 113, + 498, + 230, + 508 + ], + "type": "text", + "content": "Examining the Relationships" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 112, + 508, + 494, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 508, + 494, + 528 + ], + "spans": [ + { + "bbox": [ + 112, + 508, + 494, + 528 + ], + "type": "text", + "content": "I can see that green arrows point from one organism to another, indicating energy transfer (food source " + }, + { + "bbox": [ + 112, + 508, + 494, + 528 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 112, + 508, + 494, + 528 + ], + "type": "text", + "content": " consumer)." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 113, + 536, + 233, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 536, + 233, + 546 + ], + "spans": [ + { + "bbox": [ + 113, + 536, + 233, + 546 + ], + "type": "text", + "content": "From the diagram, I can observe:" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 113, + 547, + 300, + 586 + ], + "type": "list", + "angle": 0, + "index": 34, + "blocks": [ + { + "bbox": [ + 113, + 547, + 246, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 547, + 246, + 556 + ], + "spans": [ + { + "bbox": [ + 113, + 547, + 246, + 556 + ], + "type": "text", + "content": "- Mayfly nymphs feed on pondweed" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 113, + 556, + 275, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 556, + 275, + 567 + ], + "spans": [ + { + "bbox": [ + 113, + 556, + 275, + 567 + ], + "type": "text", + "content": "- Dragonfly nymphs feed on mayfly nymphs" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 113, + 567, + 300, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 567, + 300, + 576 + ], + "spans": [ + { + "bbox": [ + 113, + 567, + 300, + 576 + ], + "type": "text", + "content": "- Dragonfly nymphs also feed on freshwater shrimp" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 113, + 577, + 264, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 577, + 264, + 586 + ], + "spans": [ + { + "bbox": [ + 113, + 577, + 264, + 586 + ], + "type": "text", + "content": "- Brown trout feeds on dragonfly nymphs" + } + ] + } + ], + "index": 33 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 595, + 185, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 595, + 185, + 605 + ], + "spans": [ + { + "bbox": [ + 113, + 595, + 185, + 605 + ], + "type": "text", + "content": "## Impact Analysis" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 113, + 605, + 211, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 605, + 211, + 615 + ], + "spans": [ + { + "bbox": [ + 113, + 605, + 211, + 615 + ], + "type": "text", + "content": "If all mayfly nymphs died:" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 113, + 615, + 471, + 644 + ], + "type": "list", + "angle": 0, + "index": 40, + "blocks": [ + { + "bbox": [ + 113, + 615, + 353, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 615, + 353, + 624 + ], + "spans": [ + { + "bbox": [ + 113, + 615, + 353, + 624 + ], + "type": "text", + "content": "1. Dragonfly nymphs would lose one of their primary food sources" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 113, + 624, + 422, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 624, + 422, + 633 + ], + "spans": [ + { + "bbox": [ + 113, + 624, + 422, + 633 + ], + "type": "text", + "content": "2. Dragonfly nymphs would still have freshwater shrimp as an alternative food source" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 113, + 634, + 471, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 634, + 471, + 644 + ], + "spans": [ + { + "bbox": [ + 113, + 634, + 471, + 644 + ], + "type": "text", + "content": "3. However, the loss of mayfly nymphs would reduce the total available food for dragonfly nymphs" + } + ] + } + ], + "index": 39 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 653, + 168, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 653, + 168, + 662 + ], + "spans": [ + { + "bbox": [ + 113, + 653, + 168, + 662 + ], + "type": "text", + "content": "Conclusion" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 112, + 662, + 496, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 662, + 496, + 693 + ], + "spans": [ + { + "bbox": [ + 112, + 662, + 496, + 693 + ], + "type": "text", + "content": "Since dragonfly nymphs would lose a significant food source (mayfly nymphs), their population would likely decline, even though they have another food source. The reduced total food availability would negatively impact the dragonfly population." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 113, + 701, + 154, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 701, + 154, + 711 + ], + "spans": [ + { + "bbox": [ + 113, + 701, + 154, + 711 + ], + "type": "text", + "content": "Answer: C" + } + ] + } + ], + "index": 43 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "40" + } + ] + } + ], + "index": 45 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 39 + }, + { + "para_blocks": [ + { + "bbox": [ + 252, + 88, + 353, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 252, + 88, + 353, + 102 + ], + "spans": [ + { + "bbox": [ + 252, + 88, + 353, + 102 + ], + "type": "text", + "content": "Deductive (Medium)" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 363, + 89, + 375, + 101 + ], + "blocks": [ + { + "bbox": [ + 363, + 89, + 375, + 101 + ], + "lines": [ + { + "bbox": [ + 363, + 89, + 375, + 101 + ], + "spans": [ + { + "bbox": [ + 363, + 89, + 375, + 101 + ], + "type": "image", + "image_path": "01e60a93178e8267e318f99e39c4cca1b82b8f7d2705c3cfe0ae49ccbac99c55.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 110, + 118, + 223, + 166 + ], + "blocks": [ + { + "bbox": [ + 110, + 118, + 223, + 166 + ], + "lines": [ + { + "bbox": [ + 110, + 118, + 223, + 166 + ], + "spans": [ + { + "bbox": [ + 110, + 118, + 223, + 166 + ], + "type": "image", + "image_path": "4798f6c391395f4a6803778aab8ae4f3f1792d475d51602ec2691394a3f0b0da.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 134, + 723, + 474, + 736 + ], + "lines": [ + { + "bbox": [ + 134, + 723, + 474, + 736 + ], + "spans": [ + { + "bbox": [ + 134, + 723, + 474, + 736 + ], + "type": "text", + "content": "Figure 28: A sample error case of Deductive Reasoning (difficulty: Medium)." + } + ] + } + ], + "index": 54, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 226, + 111, + 497, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 226, + 111, + 497, + 159 + ], + "spans": [ + { + "bbox": [ + 226, + 111, + 497, + 159 + ], + "type": "text", + "content": "Question: In a tournament each of the 6 teams plays one match against every other team. In each round of matches, 3 take place simultaneously. A TV station has already decided which match it will broadcast for each round, as shown in the diagram. In which round will team D play against team F?" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 227, + 160, + 333, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 160, + 333, + 171 + ], + "spans": [ + { + "bbox": [ + 227, + 160, + 333, + 171 + ], + "type": "text", + "content": "Options: A: 1. B: 2. C: 3. D: 4" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 227, + 176, + 296, + 187 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 176, + 296, + 187 + ], + "spans": [ + { + "bbox": [ + 227, + 176, + 296, + 187 + ], + "type": "text", + "content": "Ground Truth: (A)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 113, + 194, + 497, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 194, + 497, + 209 + ], + "spans": [ + { + "bbox": [ + 113, + 194, + 497, + 209 + ], + "type": "text", + "content": "Claude-3.7-Sonnet-Thinking: This question is asking about a tournament where each team plays one match against every other team. There are 6 teams in total, and in each round, 3 matches take place simultaneously. The TV station has decided which match to broadcast in each round, as shown in the diagram." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 113, + 213, + 205, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 213, + 205, + 221 + ], + "spans": [ + { + "bbox": [ + 113, + 213, + 205, + 221 + ], + "type": "text", + "content": "Let's analyze what the diagram tells us:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 113, + 226, + 217, + 257 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 113, + 226, + 217, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 226, + 217, + 232 + ], + "spans": [ + { + "bbox": [ + 113, + 226, + 217, + 232 + ], + "type": "text", + "content": "- Round 1: The match being broadcast is A-B" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 114, + 232, + 216, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 232, + 216, + 239 + ], + "spans": [ + { + "bbox": [ + 114, + 232, + 216, + 239 + ], + "type": "text", + "content": "- Round 2: The match being broadcast is C-D" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 114, + 239, + 216, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 239, + 216, + 244 + ], + "spans": [ + { + "bbox": [ + 114, + 239, + 216, + 244 + ], + "type": "text", + "content": "- Round 3: The match being broadcast is A-E" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 114, + 244, + 216, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 244, + 216, + 251 + ], + "spans": [ + { + "bbox": [ + 114, + 244, + 216, + 251 + ], + "type": "text", + "content": "- Round 4: The match being broadcast is E-F" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 114, + 251, + 217, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 251, + 217, + 257 + ], + "spans": [ + { + "bbox": [ + 114, + 251, + 217, + 257 + ], + "type": "text", + "content": "- Round 5: The match being broadcast is A-C" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 262, + 276, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 262, + 276, + 270 + ], + "spans": [ + { + "bbox": [ + 113, + 262, + 276, + 270 + ], + "type": "text", + "content": "We need to determine in which round team D will play against team F." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 113, + 274, + 496, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 274, + 496, + 289 + ], + "spans": [ + { + "bbox": [ + 113, + 274, + 496, + 289 + ], + "type": "text", + "content": "First, let's think about how many rounds there will be in total. With 6 teams, each team needs to play against 5 other teams. So there are a total of " + }, + { + "bbox": [ + 113, + 274, + 496, + 289 + ], + "type": "inline_equation", + "content": "(6 \\times 5) / 2 = 15" + }, + { + "bbox": [ + 113, + 274, + 496, + 289 + ], + "type": "text", + "content": " matches. In each round, 3 matches take place, so there will be " + }, + { + "bbox": [ + 113, + 274, + 496, + 289 + ], + "type": "inline_equation", + "content": "15 / 3 = 5" + }, + { + "bbox": [ + 113, + 274, + 496, + 289 + ], + "type": "text", + "content": " rounds in total." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 113, + 293, + 496, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 293, + 496, + 308 + ], + "spans": [ + { + "bbox": [ + 113, + 293, + 496, + 308 + ], + "type": "text", + "content": "The diagram shows the match being broadcast in each round, but it doesn't tell us what the other matches in each round are. To figure this out, I need to work out the full schedule of matches." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 113, + 312, + 472, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 312, + 472, + 326 + ], + "spans": [ + { + "bbox": [ + 113, + 312, + 472, + 326 + ], + "type": "text", + "content": "In round 1, A plays against B (as shown in the diagram). So teams C, D, E, and F must form the other two matches in round 1. Let's say these are C-D and E-F. In round 2, C plays against D (as shown in the diagram). So teams A, B, E, and F must form the other two matches in round 2. Let's say these are A-E and B-F." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 113, + 330, + 388, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 330, + 388, + 338 + ], + "spans": [ + { + "bbox": [ + 113, + 330, + 388, + 338 + ], + "type": "text", + "content": "But wait, this doesn't work because we've already paired up A-B, C-D, and E-F in round 1. Let's try a different approach." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 113, + 342, + 497, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 342, + 497, + 357 + ], + "spans": [ + { + "bbox": [ + 113, + 342, + 497, + 357 + ], + "type": "text", + "content": "Let's try to systematically figure out the schedule. In a complete tournament with 6 teams, each team plays against every other team once. So each team plays 5 matches. There are a total of " + }, + { + "bbox": [ + 113, + 342, + 497, + 357 + ], + "type": "inline_equation", + "content": "(6 \\times 5) / 2 = 15" + }, + { + "bbox": [ + 113, + 342, + 497, + 357 + ], + "type": "text", + "content": " matches. With 3 matches per round, we need " + }, + { + "bbox": [ + 113, + 342, + 497, + 357 + ], + "type": "inline_equation", + "content": "15 / 3 = 5" + }, + { + "bbox": [ + 113, + 342, + 497, + 357 + ], + "type": "text", + "content": " rounds, as I calculated earlier." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 113, + 361, + 288, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 361, + 288, + 369 + ], + "spans": [ + { + "bbox": [ + 113, + 361, + 288, + 369 + ], + "type": "text", + "content": "I'll use the constraints from the diagram to help determine the full schedule." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 113, + 373, + 405, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 373, + 405, + 381 + ], + "spans": [ + { + "bbox": [ + 113, + 373, + 405, + 381 + ], + "type": "text", + "content": "Round 1: A-B is one of the matches. The other two matches must use teams C, D, E, and F. Let's try to determine what those are." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 113, + 386, + 319, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 386, + 319, + 393 + ], + "spans": [ + { + "bbox": [ + 113, + 386, + 319, + 393 + ], + "type": "text", + "content": "Round 2: C-D is one of the matches. The other two matches must use teams A, B, E, and F." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 113, + 398, + 319, + 405 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 398, + 319, + 405 + ], + "spans": [ + { + "bbox": [ + 113, + 398, + 319, + 405 + ], + "type": "text", + "content": "Round 3: A-E is one of the matches. The other two matches must use teams B, C, D, and F." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 113, + 410, + 319, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 410, + 319, + 418 + ], + "spans": [ + { + "bbox": [ + 113, + 410, + 319, + 418 + ], + "type": "text", + "content": "Round 4: E-F is one of the matches. The other two matches must use teams A, B, C, and D." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 113, + 422, + 319, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 422, + 319, + 430 + ], + "spans": [ + { + "bbox": [ + 113, + 422, + 319, + 430 + ], + "type": "text", + "content": "Round 5: A-C is one of the matches. The other two matches must use teams B, D, E, and F." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 113, + 435, + 247, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 435, + 247, + 443 + ], + "spans": [ + { + "bbox": [ + 113, + 435, + 247, + 443 + ], + "type": "text", + "content": "I'll try filling in a table of which teams play in each round." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 113, + 448, + 166, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 448, + 166, + 455 + ], + "spans": [ + { + "bbox": [ + 113, + 448, + 166, + 455 + ], + "type": "text", + "content": "Round 1: A-B, C-? E-?" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 114, + 455, + 166, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 455, + 166, + 461 + ], + "spans": [ + { + "bbox": [ + 114, + 455, + 166, + 461 + ], + "type": "text", + "content": "Round 2: C-D, A-? , ?-" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 114, + 461, + 164, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 461, + 164, + 467 + ], + "spans": [ + { + "bbox": [ + 114, + 461, + 164, + 467 + ], + "type": "text", + "content": "Round 3: A-E, ?-? , ?-?" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 114, + 467, + 164, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 467, + 164, + 473 + ], + "spans": [ + { + "bbox": [ + 114, + 467, + 164, + 473 + ], + "type": "text", + "content": "Round 4: E-F, ?-? , ?-?" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 114, + 473, + 164, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 473, + 164, + 479 + ], + "spans": [ + { + "bbox": [ + 114, + 473, + 164, + 479 + ], + "type": "text", + "content": "Round 5: A-C, ?-? , ?-?" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 113, + 484, + 432, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 484, + 432, + 492 + ], + "spans": [ + { + "bbox": [ + 113, + 484, + 432, + 492 + ], + "type": "text", + "content": "Actually, the question is specifically about when team D plays against team F. So, I really just need to find out which round that happens in." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 113, + 497, + 280, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 497, + 280, + 503 + ], + "spans": [ + { + "bbox": [ + 113, + 497, + 280, + 503 + ], + "type": "text", + "content": "From the diagram, we know which matches are broadcast in each round:" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 114, + 504, + 149, + 532 + ], + "type": "list", + "angle": 0, + "index": 40, + "blocks": [ + { + "bbox": [ + 114, + 504, + 149, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 504, + 149, + 509 + ], + "spans": [ + { + "bbox": [ + 114, + 504, + 149, + 509 + ], + "type": "text", + "content": "- Round 1: A-B" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 114, + 509, + 149, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 509, + 149, + 514 + ], + "spans": [ + { + "bbox": [ + 114, + 509, + 149, + 514 + ], + "type": "text", + "content": "- Round 2: C-D" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 114, + 514, + 149, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 514, + 149, + 520 + ], + "spans": [ + { + "bbox": [ + 114, + 514, + 149, + 520 + ], + "type": "text", + "content": "- Round 3: A-F" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 114, + 520, + 149, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 520, + 149, + 526 + ], + "spans": [ + { + "bbox": [ + 114, + 520, + 149, + 526 + ], + "type": "text", + "content": "Round 9: A-E Round 4: F-E" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 114, + 526, + 149, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 526, + 149, + 532 + ], + "spans": [ + { + "bbox": [ + 114, + 526, + 149, + 532 + ], + "type": "text", + "content": "Round 1: A C" + } + ] + } + ], + "index": 39 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 540, + 187, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 540, + 187, + 546 + ], + "spans": [ + { + "bbox": [ + 113, + 540, + 187, + 546 + ], + "type": "text", + "content": "So the matches involving D are:" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 114, + 547, + 181, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 547, + 181, + 553 + ], + "spans": [ + { + "bbox": [ + 114, + 547, + 181, + 553 + ], + "type": "text", + "content": "- Round 2: D plays against C." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 114, + 553, + 265, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 553, + 265, + 559 + ], + "spans": [ + { + "bbox": [ + 114, + 553, + 265, + 559 + ], + "type": "text", + "content": "- Other rounds: D plays against A, B, E, and F (one in each round)." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 114, + 565, + 181, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 565, + 181, + 571 + ], + "spans": [ + { + "bbox": [ + 114, + 565, + 181, + 571 + ], + "type": "text", + "content": "The matches involving F are:" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 114, + 571, + 265, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 571, + 265, + 585 + ], + "spans": [ + { + "bbox": [ + 114, + 571, + 265, + 585 + ], + "type": "text", + "content": "- Round 4: F plays against E. \n- Other rounds: F plays against A, B, C, and D (one in each round)." + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 113, + 589, + 495, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 589, + 495, + 603 + ], + "spans": [ + { + "bbox": [ + 113, + 589, + 495, + 603 + ], + "type": "text", + "content": "Now, in which round do D and F play against each other? It can't be round 2, because D plays against C in that round. It can't be round 4, because F plays against E in that round. So D and F play against each other in either round 1, 3, or 5." + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 113, + 608, + 364, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 608, + 364, + 615 + ], + "spans": [ + { + "bbox": [ + 113, + 608, + 364, + 615 + ], + "type": "text", + "content": "Let's check round 1. We know A plays against B. D and F could potentially play against each other in round 1." + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 113, + 615, + 495, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 615, + 495, + 627 + ], + "spans": [ + { + "bbox": [ + 113, + 615, + 495, + 627 + ], + "type": "text", + "content": "Let's check round 3. We know A plays against E. The other teams (B, C, D, and F) form the other two matches. If D plays against F in this round, then B must play against C." + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 113, + 627, + 495, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 627, + 495, + 639 + ], + "spans": [ + { + "bbox": [ + 113, + 627, + 495, + 639 + ], + "type": "text", + "content": "Let's check round 5. We know A plays against C. The other teams (B, D, E, and F) form the other two matches. If D plays against F in this round, then B must play against E." + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 113, + 644, + 258, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 644, + 258, + 651 + ], + "spans": [ + { + "bbox": [ + 113, + 644, + 258, + 651 + ], + "type": "text", + "content": "Hmm, I'm having trouble determining when D plays against F." + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 114, + 651, + 152, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 651, + 152, + 658 + ], + "spans": [ + { + "bbox": [ + 114, + 651, + 152, + 658 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 113, + 662, + 234, + 672 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 662, + 234, + 672 + ], + "spans": [ + { + "bbox": [ + 113, + 662, + 234, + 672 + ], + "type": "text", + "content": "Error Category: Reject to Answer" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 113, + 672, + 496, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 672, + 496, + 711 + ], + "spans": [ + { + "bbox": [ + 113, + 672, + 496, + 711 + ], + "type": "text", + "content": "Error Reason: The model rejects to answer since it fails to complete the full tournament schedule or isolate the specific round where D plays F. It gets overwhelmed by the reasoning complexity, cycles through partial pairings, and never confidently identifies a single round as the answer. As a result, it ends with indecision and no final response." + } + ] + } + ], + "index": 53 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "41" + } + ] + } + ], + "index": 55 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 40 + }, + { + "para_blocks": [ + { + "bbox": [ + 252, + 88, + 353, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 252, + 88, + 353, + 102 + ], + "spans": [ + { + "bbox": [ + 252, + 88, + 353, + 102 + ], + "type": "text", + "content": "Deductive (Medium)" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 363, + 89, + 375, + 100 + ], + "blocks": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "lines": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "spans": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "type": "image", + "image_path": "c026a961938ed94885c911fa25839173a82101bc93bfae62d8fee252741b6a94.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 723, + 479, + 736 + ], + "lines": [ + { + "bbox": [ + 130, + 723, + 479, + 736 + ], + "spans": [ + { + "bbox": [ + 130, + 723, + 479, + 736 + ], + "type": "text", + "content": "Figure 29: A sample correct case of Deductive Reasoning (difficulty: Medium)." + } + ] + } + ], + "index": 67, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 109, + 107, + 194, + 191 + ], + "blocks": [ + { + "bbox": [ + 109, + 107, + 194, + 191 + ], + "lines": [ + { + "bbox": [ + 109, + 107, + 194, + 191 + ], + "spans": [ + { + "bbox": [ + 109, + 107, + 194, + 191 + ], + "type": "table", + "html": "
LWBVMQ
ZPUKGJ
ADHLRZ
GSXEOY
KICAFT
MFICS
", + "image_path": "6e550321c6f928d460bbfa8999034f2fb0a676f1d34b0fb01b756caa063ddc83.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 200, + 121, + 488, + 151 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 200, + 121, + 488, + 151 + ], + "spans": [ + { + "bbox": [ + 200, + 121, + 488, + 151 + ], + "type": "text", + "content": "Question: Jeff represents words applying the board demonstrated. For example, the word DCXEH is represented as AM KF GF GI AF. What word did Jeff represent KS KF LC LF GM?" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 201, + 152, + 402, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 201, + 152, + 402, + 162 + ], + "spans": [ + { + "bbox": [ + 201, + 152, + 402, + 162 + ], + "type": "text", + "content": "Options: A: TSMBC. B: TCMBS. C: PEQGT. D: FHMTK" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 201, + 170, + 270, + 181 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 201, + 170, + 270, + 181 + ], + "spans": [ + { + "bbox": [ + 201, + 170, + 270, + 181 + ], + "type": "text", + "content": "Ground Truth: (B)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 113, + 193, + 497, + 208 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 193, + 497, + 208 + ], + "spans": [ + { + "bbox": [ + 113, + 193, + 497, + 208 + ], + "type": "text", + "content": "Claude-3.7-Sonnet-Thinking: I'm given a grid or board that represents a mapping from letters to two-letter codes. Let's examine how the mapping works by looking at the example." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 113, + 212, + 238, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 212, + 238, + 220 + ], + "spans": [ + { + "bbox": [ + 113, + 212, + 238, + 220 + ], + "type": "text", + "content": "The word DCXEH is represented as AM KF GF GI AF." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 113, + 225, + 183, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 225, + 183, + 232 + ], + "spans": [ + { + "bbox": [ + 113, + 225, + 183, + 232 + ], + "type": "text", + "content": "Let me determine the pattern:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 114, + 232, + 184, + 262 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 114, + 232, + 172, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 232, + 172, + 236 + ], + "spans": [ + { + "bbox": [ + 114, + 232, + 172, + 236 + ], + "type": "text", + "content": "- D is represented as AM" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 114, + 236, + 169, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 236, + 169, + 243 + ], + "spans": [ + { + "bbox": [ + 114, + 236, + 169, + 243 + ], + "type": "text", + "content": "- C is represented as KF" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 114, + 243, + 184, + 249 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 243, + 184, + 249 + ], + "spans": [ + { + "bbox": [ + 114, + 243, + 184, + 249 + ], + "type": "text", + "content": "- X is represented as GF" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 114, + 249, + 184, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 249, + 184, + 255 + ], + "spans": [ + { + "bbox": [ + 114, + 249, + 184, + 255 + ], + "type": "text", + "content": "E is represented as GI" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 114, + 255, + 170, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 255, + 170, + 262 + ], + "spans": [ + { + "bbox": [ + 114, + 255, + 170, + 262 + ], + "type": "text", + "content": "- H is represented as AF" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 267, + 448, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 267, + 448, + 275 + ], + "spans": [ + { + "bbox": [ + 113, + 267, + 448, + 275 + ], + "type": "text", + "content": "Looking at the image, I see a grid with rows labeled with L, Z, A, G, K and columns labeled with W, B, V, M, Q, and a bottom row with M, F, I, C, S." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 113, + 280, + 423, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 280, + 423, + 288 + ], + "spans": [ + { + "bbox": [ + 113, + 280, + 423, + 288 + ], + "type": "text", + "content": "To understand how this board works for encoding, I need to figure out how each letter in the original word maps to the two-letter code." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 113, + 293, + 395, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 293, + 395, + 300 + ], + "spans": [ + { + "bbox": [ + 113, + 293, + 395, + 300 + ], + "type": "text", + "content": "For D, it maps to AM. D appears in the first column of the row labeled A. So A (row) + M (column bottom row) = AM for D." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 113, + 300, + 392, + 306 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 300, + 392, + 306 + ], + "spans": [ + { + "bbox": [ + 113, + 300, + 392, + 306 + ], + "type": "text", + "content": "For C, it maps to KF. C appears in the third column of the row labeled K. So K (row) + F (column bottom row) = KF for C." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 114, + 306, + 391, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 306, + 391, + 312 + ], + "spans": [ + { + "bbox": [ + 114, + 306, + 391, + 312 + ], + "type": "text", + "content": "For X, it maps to GF. X appears in the third column of the row labeled G. So G (row) + F (column bottom row) = GF for X." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 114, + 312, + 391, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 312, + 391, + 318 + ], + "spans": [ + { + "bbox": [ + 114, + 312, + 391, + 318 + ], + "type": "text", + "content": "For E, it maps to GI. E appears in the fourth column of the row labeled G. So G (row) + I (column bottom row) = GI for E." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 114, + 318, + 392, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 318, + 392, + 324 + ], + "spans": [ + { + "bbox": [ + 114, + 318, + 392, + 324 + ], + "type": "text", + "content": "For H, it maps to AF. H appears in the third column of the row labeled A. So A (row) + F (column bottom row) = AF for H." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 113, + 330, + 175, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 330, + 175, + 336 + ], + "spans": [ + { + "bbox": [ + 113, + 330, + 175, + 336 + ], + "type": "text", + "content": "So the pattern seems to be:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 114, + 336, + 182, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 336, + 182, + 342 + ], + "spans": [ + { + "bbox": [ + 114, + 336, + 182, + 342 + ], + "type": "text", + "content": "1. Locate the letter in the grid" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 114, + 342, + 215, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 342, + 215, + 347 + ], + "spans": [ + { + "bbox": [ + 114, + 342, + 215, + 347 + ], + "type": "text", + "content": "2. The first letter of the code is the row label" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 114, + 348, + 353, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 348, + 353, + 354 + ], + "spans": [ + { + "bbox": [ + 114, + 348, + 353, + 354 + ], + "type": "text", + "content": "3. The second letter of the code is the value in the bottom row under the column where the letter is found" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 114, + 361, + 197, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 361, + 197, + 367 + ], + "spans": [ + { + "bbox": [ + 114, + 361, + 197, + 367 + ], + "type": "text", + "content": "Now let's decode KS KF LC LF GM:" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 114, + 373, + 123, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 373, + 123, + 379 + ], + "spans": [ + { + "bbox": [ + 114, + 373, + 123, + 379 + ], + "type": "text", + "content": "KS:" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 114, + 379, + 305, + 385 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 379, + 305, + 385 + ], + "spans": [ + { + "bbox": [ + 114, + 379, + 305, + 385 + ], + "type": "text", + "content": "Row K, column with bottom row value S. This refers to column 5, which contains T." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 114, + 392, + 123, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 392, + 123, + 397 + ], + "spans": [ + { + "bbox": [ + 114, + 392, + 123, + 397 + ], + "type": "text", + "content": "KF:" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 114, + 397, + 306, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 397, + 306, + 404 + ], + "spans": [ + { + "bbox": [ + 114, + 397, + 306, + 404 + ], + "type": "text", + "content": "Row K, column with bottom row value F. This refers to column 2, which contains C." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 114, + 410, + 123, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 410, + 123, + 415 + ], + "spans": [ + { + "bbox": [ + 114, + 410, + 123, + 415 + ], + "type": "text", + "content": "LC:" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 114, + 415, + 307, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 415, + 307, + 422 + ], + "spans": [ + { + "bbox": [ + 114, + 415, + 307, + 422 + ], + "type": "text", + "content": "Row L, column with bottom row value C. This refers to column 4, which contains M." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 114, + 429, + 122, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 429, + 122, + 434 + ], + "spans": [ + { + "bbox": [ + 114, + 429, + 122, + 434 + ], + "type": "text", + "content": "LF:" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 114, + 434, + 304, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 434, + 304, + 440 + ], + "spans": [ + { + "bbox": [ + 114, + 434, + 304, + 440 + ], + "type": "text", + "content": "Row L, column with bottom row value F. This refers to column 2, which contains B." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 114, + 446, + 123, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 446, + 123, + 451 + ], + "spans": [ + { + "bbox": [ + 114, + 446, + 123, + 451 + ], + "type": "text", + "content": "GM" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 114, + 453, + 307, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 453, + 307, + 460 + ], + "spans": [ + { + "bbox": [ + 114, + 453, + 307, + 460 + ], + "type": "text", + "content": "Row G, column with bottom row value M. This refers to column 1, which contains S." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 114, + 465, + 188, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 465, + 188, + 471 + ], + "spans": [ + { + "bbox": [ + 114, + 465, + 188, + 471 + ], + "type": "text", + "content": "So the decoded word is TCMBS." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 114, + 472, + 152, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 472, + 152, + 478 + ], + "spans": [ + { + "bbox": [ + 114, + 472, + 152, + 478 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 114, + 478, + 200, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 478, + 200, + 484 + ], + "spans": [ + { + "bbox": [ + 114, + 478, + 200, + 484 + ], + "type": "text", + "content": "# Decoding the Word Representation" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 113, + 490, + 364, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 490, + 364, + 497 + ], + "spans": [ + { + "bbox": [ + 113, + 490, + 364, + 497 + ], + "type": "text", + "content": "I need to figure out how Jeff's encoding system works based on the example, then decode the given sequence." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 114, + 502, + 206, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 502, + 206, + 510 + ], + "spans": [ + { + "bbox": [ + 114, + 502, + 206, + 510 + ], + "type": "text", + "content": "## Understanding the Encoding System" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 114, + 515, + 151, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 515, + 151, + 521 + ], + "spans": [ + { + "bbox": [ + 114, + 515, + 151, + 521 + ], + "type": "text", + "content": "In the example:" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 114, + 521, + 217, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 521, + 217, + 527 + ], + "spans": [ + { + "bbox": [ + 114, + 521, + 217, + 527 + ], + "type": "text", + "content": "- DCXEH is represented as AM KF GFI AF" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 114, + 533, + 294, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 533, + 294, + 540 + ], + "spans": [ + { + "bbox": [ + 114, + 533, + 294, + 540 + ], + "type": "text", + "content": "Looking at the grid provided in the image, I can see this is a coordinate system:" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 114, + 540, + 222, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 540, + 222, + 546 + ], + "spans": [ + { + "bbox": [ + 114, + 540, + 222, + 546 + ], + "type": "text", + "content": "- The rows are labeled with letters L, Z, A, G, K" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 114, + 546, + 247, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 546, + 247, + 552 + ], + "spans": [ + { + "bbox": [ + 114, + 546, + 247, + 552 + ], + "type": "text", + "content": "- The columns have a bottom row with values M, F, I, C, S" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 114, + 558, + 200, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 558, + 200, + 563 + ], + "spans": [ + { + "bbox": [ + 114, + 558, + 200, + 563 + ], + "type": "text", + "content": "The encoding seems to work like this:" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 114, + 563, + 263, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 563, + 263, + 571 + ], + "spans": [ + { + "bbox": [ + 114, + 563, + 263, + 571 + ], + "type": "text", + "content": "1. For each letter in the original word, find its position in the grid" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 114, + 571, + 212, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 571, + 212, + 576 + ], + "spans": [ + { + "bbox": [ + 114, + 571, + 212, + 576 + ], + "type": "text", + "content": "2. The first part of the code is the row label" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 114, + 576, + 266, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 576, + 266, + 582 + ], + "spans": [ + { + "bbox": [ + 114, + 576, + 266, + 582 + ], + "type": "text", + "content": "3. The second part is the value from the bottom row of that column" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 114, + 589, + 187, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 589, + 187, + 595 + ], + "spans": [ + { + "bbox": [ + 114, + 589, + 187, + 595 + ], + "type": "text", + "content": "Let me verify with the example:" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 114, + 595, + 226, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 595, + 226, + 601 + ], + "spans": [ + { + "bbox": [ + 114, + 595, + 226, + 601 + ], + "type": "text", + "content": "- D is in row A, column with M at bottom " + }, + { + "bbox": [ + 114, + 595, + 226, + 601 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 114, + 595, + 226, + 601 + ], + "type": "text", + "content": " AM" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 114, + 601, + 220, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 601, + 220, + 607 + ], + "spans": [ + { + "bbox": [ + 114, + 601, + 220, + 607 + ], + "type": "text", + "content": "- C is in row K, column with F at bottom " + }, + { + "bbox": [ + 114, + 601, + 220, + 607 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 114, + 601, + 220, + 607 + ], + "type": "text", + "content": " KF" + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 114, + 607, + 220, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 607, + 220, + 613 + ], + "spans": [ + { + "bbox": [ + 114, + 607, + 220, + 613 + ], + "type": "text", + "content": "- X is in row G, column with F at bottom " + }, + { + "bbox": [ + 114, + 607, + 220, + 613 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 114, + 607, + 220, + 613 + ], + "type": "text", + "content": " GP" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 114, + 613, + 220, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 613, + 220, + 619 + ], + "spans": [ + { + "bbox": [ + 114, + 613, + 220, + 619 + ], + "type": "text", + "content": "- E is in row G, column with I at bottom " + }, + { + "bbox": [ + 114, + 613, + 220, + 619 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 114, + 613, + 220, + 619 + ], + "type": "text", + "content": " GI" + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 114, + 619, + 221, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 619, + 221, + 625 + ], + "spans": [ + { + "bbox": [ + 114, + 619, + 221, + 625 + ], + "type": "text", + "content": "- H is in row " + }, + { + "bbox": [ + 114, + 619, + 221, + 625 + ], + "type": "inline_equation", + "content": "A_{t}" + }, + { + "bbox": [ + 114, + 619, + 221, + 625 + ], + "type": "text", + "content": " column with F at bottom " + }, + { + "bbox": [ + 114, + 619, + 221, + 625 + ], + "type": "inline_equation", + "content": "\\rightarrow \\mathrm{AF}" + }, + { + "bbox": [ + 114, + 619, + 221, + 625 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 114, + 632, + 191, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 632, + 191, + 639 + ], + "spans": [ + { + "bbox": [ + 114, + 632, + 191, + 639 + ], + "type": "text", + "content": "## Decoding the Given Sequence" + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 114, + 644, + 196, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 644, + 196, + 651 + ], + "spans": [ + { + "bbox": [ + 114, + 644, + 196, + 651 + ], + "type": "text", + "content": "Now let's decode KS KF LC LF GM:" + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 114, + 656, + 241, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 656, + 241, + 662 + ], + "spans": [ + { + "bbox": [ + 114, + 656, + 241, + 662 + ], + "type": "text", + "content": "- KS: Row K, column with S at bottom " + }, + { + "bbox": [ + 114, + 656, + 241, + 662 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 114, + 656, + 241, + 662 + ], + "type": "text", + "content": " The letter is T" + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 114, + 662, + 241, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 662, + 241, + 669 + ], + "spans": [ + { + "bbox": [ + 114, + 662, + 241, + 669 + ], + "type": "text", + "content": "- KF: Row K, column with F at bottom " + }, + { + "bbox": [ + 114, + 662, + 241, + 669 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 114, + 662, + 241, + 669 + ], + "type": "text", + "content": " The letter is C" + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 114, + 669, + 242, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 669, + 242, + 675 + ], + "spans": [ + { + "bbox": [ + 114, + 669, + 242, + 675 + ], + "type": "text", + "content": "- LC: Row L, column with C at bottom " + }, + { + "bbox": [ + 114, + 669, + 242, + 675 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 114, + 669, + 242, + 675 + ], + "type": "text", + "content": " The letter is M" + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 114, + 675, + 239, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 675, + 239, + 681 + ], + "spans": [ + { + "bbox": [ + 114, + 675, + 239, + 681 + ], + "type": "text", + "content": "- LF: Row " + }, + { + "bbox": [ + 114, + 675, + 239, + 681 + ], + "type": "inline_equation", + "content": "\\mathrm{L}_r" + }, + { + "bbox": [ + 114, + 675, + 239, + 681 + ], + "type": "text", + "content": ", column with F at bottom → The letter is B" + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 114, + 681, + 244, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 681, + 244, + 687 + ], + "spans": [ + { + "bbox": [ + 114, + 681, + 244, + 687 + ], + "type": "text", + "content": "- GM: Row G, column with M at bottom " + }, + { + "bbox": [ + 114, + 681, + 244, + 687 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 114, + 681, + 244, + 687 + ], + "type": "text", + "content": " The letter is S" + } + ] + } + ], + "index": 64 + }, + { + "bbox": [ + 114, + 693, + 225, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 693, + 225, + 700 + ], + "spans": [ + { + "bbox": [ + 114, + 693, + 225, + 700 + ], + "type": "text", + "content": "This gives us TCMBS, which matches option (B)." + } + ] + } + ], + "index": 65 + }, + { + "bbox": [ + 114, + 705, + 139, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 705, + 139, + 712 + ], + "spans": [ + { + "bbox": [ + 114, + 705, + 139, + 712 + ], + "type": "text", + "content": "Answer: B" + } + ] + } + ], + "index": 66 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "42" + } + ] + } + ], + "index": 68 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 41 + }, + { + "para_blocks": [ + { + "bbox": [ + 260, + 89, + 345, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 260, + 89, + 345, + 102 + ], + "spans": [ + { + "bbox": [ + 260, + 89, + 345, + 102 + ], + "type": "text", + "content": "Deductive (Hard)" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 363, + 89, + 375, + 100 + ], + "blocks": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "lines": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "spans": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "type": "image", + "image_path": "ec18a4ac4c2468c7500f4f846ec923d07b52c062bec7b6f9ea05e65fb8aa82cd.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 130, + 106, + 267, + 205 + ], + "blocks": [ + { + "bbox": [ + 130, + 106, + 267, + 205 + ], + "lines": [ + { + "bbox": [ + 130, + 106, + 267, + 205 + ], + "spans": [ + { + "bbox": [ + 130, + 106, + 267, + 205 + ], + "type": "image", + "image_path": "7e313e170b470582b9ae9c2a7973d28c3e99cdd5aa3de4c0ceb96af05e72e7c3.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 141, + 723, + 467, + 736 + ], + "lines": [ + { + "bbox": [ + 141, + 723, + 467, + 736 + ], + "spans": [ + { + "bbox": [ + 141, + 723, + 467, + 736 + ], + "type": "text", + "content": "Figure 30: A sample error case of Deductive Reasoning (difficulty: Hard)." + } + ] + } + ], + "index": 53, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 286, + 111, + 485, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 111, + 485, + 170 + ], + "spans": [ + { + "bbox": [ + 286, + 111, + 485, + 170 + ], + "type": "text", + "content": "Question: Jan cannot draw very accurately but nevertheless he tried to produce a roadmap of his village. The relative position of the houses and the street crossings are all correct but three of the roads are actually straight and only Qurwik street is not. Who lives in Qurwik street?" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 287, + 170, + 448, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 287, + 170, + 448, + 180 + ], + "spans": [ + { + "bbox": [ + 287, + 170, + 448, + 180 + ], + "type": "text", + "content": "Options: A: Amy. B: Ben. C: Carol. D: David" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 287, + 190, + 356, + 201 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 287, + 190, + 356, + 201 + ], + "spans": [ + { + "bbox": [ + 287, + 190, + 356, + 201 + ], + "type": "text", + "content": "Ground Truth: (C)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 113, + 212, + 435, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 212, + 435, + 220 + ], + "spans": [ + { + "bbox": [ + 113, + 212, + 435, + 220 + ], + "type": "text", + "content": "Claude-3.7-Sonnet-Thinking: In this problem, I'm given a drawing of a village roadmap made by Jan. According to the problem:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 114, + 221, + 316, + 243 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 114, + 221, + 264, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 221, + 264, + 225 + ], + "spans": [ + { + "bbox": [ + 114, + 221, + 264, + 225 + ], + "type": "text", + "content": "1. The relative positions of houses and street crossings are correct" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 114, + 225, + 316, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 225, + 316, + 232 + ], + "spans": [ + { + "bbox": [ + 114, + 225, + 316, + 232 + ], + "type": "text", + "content": "2. Three of the roads are actually straight in reality, but only Qurwik street is not straight" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 114, + 232, + 274, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 232, + 274, + 237 + ], + "spans": [ + { + "bbox": [ + 114, + 232, + 274, + 237 + ], + "type": "text", + "content": "3. The houses are labeled with the names: Amy, Ben, Carol, and David" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 114, + 237, + 228, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 237, + 228, + 243 + ], + "spans": [ + { + "bbox": [ + 114, + 237, + 228, + 243 + ], + "type": "text", + "content": "4. I need to determine who lives on Qurwik street" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 248, + 402, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 248, + 402, + 256 + ], + "spans": [ + { + "bbox": [ + 113, + 248, + 402, + 256 + ], + "type": "text", + "content": "Looking at the image, I can see a diagram with four houses labeled with the names and several curved roads connecting them." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 113, + 261, + 465, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 261, + 465, + 269 + ], + "spans": [ + { + "bbox": [ + 113, + 261, + 465, + 269 + ], + "type": "text", + "content": "Since three roads are actually straight and only Qurwik street is not, I need to identify which road would most likely not be straight based on the diagram." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 113, + 274, + 204, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 274, + 204, + 280 + ], + "spans": [ + { + "bbox": [ + 113, + 274, + 204, + 280 + ], + "type": "text", + "content": "The roads in the diagram appear to be:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 114, + 281, + 237, + 312 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 114, + 281, + 227, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 281, + 227, + 286 + ], + "spans": [ + { + "bbox": [ + 114, + 281, + 227, + 286 + ], + "type": "text", + "content": "1. A road connecting Amy's house to the network" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 114, + 287, + 229, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 287, + 229, + 293 + ], + "spans": [ + { + "bbox": [ + 114, + 287, + 229, + 293 + ], + "type": "text", + "content": "2. A road connecting Carol's house to the network" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 114, + 293, + 228, + 298 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 293, + 228, + 298 + ], + "spans": [ + { + "bbox": [ + 114, + 293, + 228, + 298 + ], + "type": "text", + "content": "3. A road connecting David's house to the network" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 114, + 299, + 225, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 299, + 225, + 304 + ], + "spans": [ + { + "bbox": [ + 114, + 299, + 225, + 304 + ], + "type": "text", + "content": "4. A road connecting Ben's house to the network" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 114, + 305, + 237, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 305, + 237, + 312 + ], + "spans": [ + { + "bbox": [ + 114, + 305, + 237, + 312 + ], + "type": "text", + "content": "5. Various segments connecting these houses together" + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 317, + 496, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 317, + 496, + 330 + ], + "spans": [ + { + "bbox": [ + 113, + 317, + 496, + 330 + ], + "type": "text", + "content": "Since we're told that only one road is not straight, and we see multiple curved segments in the diagram, it must be that these curved segments represent the same road - Qurwik street." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 113, + 335, + 356, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 335, + 356, + 342 + ], + "spans": [ + { + "bbox": [ + 113, + 335, + 356, + 342 + ], + "type": "text", + "content": "Looking carefully at the diagram, I need to determine which person's house is connected to Qurwik street." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 113, + 347, + 483, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 347, + 483, + 355 + ], + "spans": [ + { + "bbox": [ + 113, + 347, + 483, + 355 + ], + "type": "text", + "content": "The diagram shows curved roads connecting different houses. Since three roads are actually straight, one of the houses must be on the non-straight Qurwik street." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 113, + 360, + 496, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 360, + 496, + 373 + ], + "spans": [ + { + "bbox": [ + 113, + 360, + 496, + 373 + ], + "type": "text", + "content": "Looking at the connectivity in the diagram, it appears that there's a curved path that runs between multiple houses. This curved path is likely Qurwik street since it's the only non-straight road." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 113, + 378, + 496, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 378, + 496, + 392 + ], + "spans": [ + { + "bbox": [ + 113, + 378, + 496, + 392 + ], + "type": "text", + "content": "Based on the diagram, it looks like Ben's house is at one end of what appears to be a major curved road. David's house seems to be connected to this same curved road. Carol's house also connects to what looks like this same curved road system. Amy's house seems to be connected to part of this curved network as well." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 113, + 396, + 496, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 396, + 496, + 410 + ], + "spans": [ + { + "bbox": [ + 113, + 396, + 496, + 410 + ], + "type": "text", + "content": "This is a bit ambiguous from the diagram alone, as I can't clearly tell which specific segment is Qurwik street versus other roads that might be straight in reality despite being drawn curved." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 113, + 415, + 496, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 415, + 496, + 429 + ], + "spans": [ + { + "bbox": [ + 113, + 415, + 496, + 429 + ], + "type": "text", + "content": "Without more specific information about which road is Qurwik street, I would have to make an inference based on which house seems most connected to curved paths in the diagram." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 113, + 434, + 257, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 434, + 257, + 440 + ], + "spans": [ + { + "bbox": [ + 113, + 434, + 257, + 440 + ], + "type": "text", + "content": "Looking at the houses and their positions relative to the roads:" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 114, + 441, + 263, + 465 + ], + "type": "list", + "angle": 0, + "index": 34, + "blocks": [ + { + "bbox": [ + 114, + 441, + 263, + 446 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 441, + 263, + 446 + ], + "spans": [ + { + "bbox": [ + 114, + 441, + 263, + 446 + ], + "type": "text", + "content": "- Amy's house appears to be connected to a curved road segment" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 114, + 447, + 260, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 447, + 260, + 453 + ], + "spans": [ + { + "bbox": [ + 114, + 447, + 260, + 453 + ], + "type": "text", + "content": "- Ben's house appears to be at the end of a curved road segment" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 114, + 454, + 228, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 454, + 228, + 460 + ], + "spans": [ + { + "bbox": [ + 114, + 454, + 228, + 460 + ], + "type": "text", + "content": "- Carol's house connects to a curved road segment" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 114, + 460, + 238, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 460, + 238, + 465 + ], + "spans": [ + { + "bbox": [ + 114, + 460, + 238, + 465 + ], + "type": "text", + "content": "- David's house connects to multiple curved segments" + } + ] + } + ], + "index": 33 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 471, + 496, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 471, + 496, + 484 + ], + "spans": [ + { + "bbox": [ + 113, + 471, + 496, + 484 + ], + "type": "text", + "content": "Based on the diagram, David's house seems to be most central to the curved road network, with multiple curved segments connecting to it. This suggests David might be on Qurwik street." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 113, + 489, + 356, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 489, + 356, + 495 + ], + "spans": [ + { + "bbox": [ + 113, + 489, + 356, + 495 + ], + "type": "text", + "content": "However, this is somewhat ambiguous without more clear labeling of which road is which in the diagram." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 114, + 496, + 151, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 496, + 151, + 502 + ], + "spans": [ + { + "bbox": [ + 114, + 496, + 151, + 502 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 114, + 502, + 206, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 502, + 206, + 509 + ], + "spans": [ + { + "bbox": [ + 114, + 502, + 206, + 509 + ], + "type": "text", + "content": "Solving the Village Roadmap Problem" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 113, + 514, + 408, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 514, + 408, + 521 + ], + "spans": [ + { + "bbox": [ + 113, + 514, + 408, + 521 + ], + "type": "text", + "content": "I need to determine who lives on Qurwik street, given that three roads are actually straight and only Qurwik street is not straight." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 113, + 526, + 494, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 526, + 494, + 540 + ], + "spans": [ + { + "bbox": [ + 113, + 526, + 494, + 540 + ], + "type": "text", + "content": "Looking at the image, I can see houses belonging to Amy, Ben, Carol, and David connected by various curved roads. Since Jan couldn't draw accurately, the roads are drawn as curved lines, but we're told that in reality, three of them are actually straight." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 113, + 544, + 190, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 544, + 190, + 552 + ], + "spans": [ + { + "bbox": [ + 113, + 544, + 190, + 552 + ], + "type": "text", + "content": "Analyzing the Road Network" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 113, + 558, + 172, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 558, + 172, + 563 + ], + "spans": [ + { + "bbox": [ + 113, + 558, + 172, + 563 + ], + "type": "text", + "content": "In the diagram, I can see:" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 114, + 564, + 238, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 564, + 238, + 570 + ], + "spans": [ + { + "bbox": [ + 114, + 564, + 238, + 570 + ], + "type": "text", + "content": "- Four houses connected by a network of curved roads" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 114, + 571, + 216, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 571, + 216, + 575 + ], + "spans": [ + { + "bbox": [ + 114, + 571, + 216, + 575 + ], + "type": "text", + "content": "- Multiple intersections between these roads" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 114, + 576, + 217, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 576, + 217, + 582 + ], + "spans": [ + { + "bbox": [ + 114, + 576, + 217, + 582 + ], + "type": "text", + "content": "Each house is connected to at least one road" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 114, + 588, + 182, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 588, + 182, + 595 + ], + "spans": [ + { + "bbox": [ + 114, + 588, + 182, + 595 + ], + "type": "text", + "content": "Identifying Qurwik Street" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 113, + 600, + 376, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 600, + 376, + 608 + ], + "spans": [ + { + "bbox": [ + 113, + 600, + 376, + 608 + ], + "type": "text", + "content": "To determine which road is Qurwik street (the only non-straight road), I need to analyze the pattern of connections." + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 113, + 613, + 496, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 613, + 496, + 632 + ], + "spans": [ + { + "bbox": [ + 113, + 613, + 496, + 632 + ], + "type": "text", + "content": "Looking at the diagram carefully, I notice that David's house appears to be connected to a road that makes a distinctive curved path. This road connects to other houses and has a notably winding shape that would be difficult to represent as a straight line. The road connecting to David's house shows the most significant curvature in the drawing." + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 113, + 637, + 496, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 637, + 496, + 651 + ], + "spans": [ + { + "bbox": [ + 113, + 637, + 496, + 651 + ], + "type": "text", + "content": "Since the problem states that three roads are actually straight and only Qurwik street is not, and David's house connects to what appears to be the most significantly curved road in the diagram, it's reasonable to conclude that David lives on Qurwik street." + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 113, + 655, + 140, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 655, + 140, + 662 + ], + "spans": [ + { + "bbox": [ + 113, + 655, + 140, + 662 + ], + "type": "text", + "content": "Answer: D" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 113, + 670, + 249, + 680 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 670, + 249, + 680 + ], + "spans": [ + { + "bbox": [ + 113, + 670, + 249, + 680 + ], + "type": "text", + "content": "Error Category: Understanding Error" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 113, + 681, + 496, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 681, + 496, + 708 + ], + "spans": [ + { + "bbox": [ + 113, + 681, + 496, + 708 + ], + "type": "text", + "content": "Error Reason: The model misinterprets the condition that only one road (Qurwik street) is not straight, while the other three are. It misunderstands the roads that seem to be curved in the image are not all really curved." + } + ] + } + ], + "index": 52 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "43" + } + ] + } + ], + "index": 54 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 42 + }, + { + "para_blocks": [ + { + "bbox": [ + 260, + 88, + 345, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 260, + 88, + 345, + 102 + ], + "spans": [ + { + "bbox": [ + 260, + 88, + 345, + 102 + ], + "type": "text", + "content": "Deductive (Hard)" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 363, + 89, + 375, + 100 + ], + "blocks": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "lines": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "spans": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "type": "image", + "image_path": "60adf99283738943138682dd44b10b03153727886a6da65dcb8737ff516f26e7.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 110, + 112, + 232, + 205 + ], + "blocks": [ + { + "bbox": [ + 110, + 112, + 232, + 205 + ], + "lines": [ + { + "bbox": [ + 110, + 112, + 232, + 205 + ], + "spans": [ + { + "bbox": [ + 110, + 112, + 232, + 205 + ], + "type": "image", + "image_path": "61b015d2546abf578760e8cc3932dd392875f94640dc490a388206fc830df894.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 138, + 723, + 471, + 736 + ], + "lines": [ + { + "bbox": [ + 138, + 723, + 471, + 736 + ], + "spans": [ + { + "bbox": [ + 138, + 723, + 471, + 736 + ], + "type": "text", + "content": "Figure 31: A sample correct case of Deductive Reasoning (difficulty: Hard)." + } + ] + } + ], + "index": 56, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 236, + 114, + 491, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 114, + 491, + 181 + ], + "spans": [ + { + "bbox": [ + 236, + 114, + 491, + 181 + ], + "type": "text", + "content": "Question: Diana draws a rectangle made up of twelve squares onto a piece of squared paper. Some of the squares are coloured in black. She writes the number of adjacent black squares into every white square. The diagram shows an example of such a rectangle. Now she does the same with a rectangle made up of 2018 squares. What is the biggest number that she can obtain as the sum of all numbers in the white squares?" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 238, + 182, + 392, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 238, + 182, + 392, + 191 + ], + "spans": [ + { + "bbox": [ + 238, + 182, + 392, + 191 + ], + "type": "text", + "content": "Options: A: 2800. B: 2950. C: 3025. D: 3100" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 238, + 198, + 307, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 238, + 198, + 307, + 209 + ], + "spans": [ + { + "bbox": [ + 238, + 198, + 307, + 209 + ], + "type": "text", + "content": "Ground Truth: (C)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 113, + 217, + 496, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 217, + 496, + 232 + ], + "spans": [ + { + "bbox": [ + 113, + 217, + 496, + 232 + ], + "type": "text", + "content": "Claude-3.7-Sonnet-Thinking: In this problem, Diana has a rectangle made up of squares, some of which are colored black. Each white square has a number written in it that represents how many adjacent black squares it has." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 113, + 236, + 165, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 236, + 165, + 243 + ], + "spans": [ + { + "bbox": [ + 113, + 236, + 165, + 243 + ], + "type": "text", + "content": "In the example given:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 114, + 243, + 187, + 262 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 114, + 243, + 181, + 250 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 243, + 181, + 250 + ], + "spans": [ + { + "bbox": [ + 114, + 243, + 181, + 250 + ], + "type": "text", + "content": "- The first row: 1, [black], 2, 1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 114, + 250, + 187, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 250, + 187, + 255 + ], + "spans": [ + { + "bbox": [ + 114, + 250, + 187, + 255 + ], + "type": "text", + "content": "- The second row: 0, 3, [black], 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 114, + 255, + 183, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 255, + 183, + 262 + ], + "spans": [ + { + "bbox": [ + 114, + 255, + 183, + 262 + ], + "type": "text", + "content": "- The third row: 1, [black], 2, 1" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 267, + 365, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 267, + 365, + 275 + ], + "spans": [ + { + "bbox": [ + 113, + 267, + 365, + 275 + ], + "type": "text", + "content": "The rectangle has 12 squares total, with 4 of them being black (marked as [black]) and 8 white (with numbers)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 113, + 279, + 455, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 279, + 455, + 287 + ], + "spans": [ + { + "bbox": [ + 113, + 279, + 455, + 287 + ], + "type": "text", + "content": "Now, Diana does the same with a rectangle made up of 2018 squares. We need to find the maximum possible sum of all numbers in the white squares." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 113, + 292, + 495, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 292, + 495, + 305 + ], + "spans": [ + { + "bbox": [ + 113, + 292, + 495, + 305 + ], + "type": "text", + "content": "Let's think about how to maximize this sum. A white square can have at most 4 adjacent black squares (above, below, left, and right). So each white square can contribute at most 4 to the sum." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 113, + 310, + 495, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 310, + 495, + 323 + ], + "spans": [ + { + "bbox": [ + 113, + 310, + 495, + 323 + ], + "type": "text", + "content": "Let's consider the example given, which is a " + }, + { + "bbox": [ + 113, + 310, + 495, + 323 + ], + "type": "inline_equation", + "content": "3 \\times 4" + }, + { + "bbox": [ + 113, + 310, + 495, + 323 + ], + "type": "text", + "content": " grid. There are 4 black squares and 8 white squares. The sum of numbers in white squares is " + }, + { + "bbox": [ + 113, + 310, + 495, + 323 + ], + "type": "inline_equation", + "content": "1 + 2 + 1 + 0 + 3 + 1 + 1 + 2 + 1 = 12" + }, + { + "bbox": [ + 113, + 310, + 495, + 323 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 113, + 328, + 413, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 328, + 413, + 337 + ], + "spans": [ + { + "bbox": [ + 113, + 328, + 413, + 337 + ], + "type": "text", + "content": "I think I need to determine how to maximize the sum by choosing the right number of black squares and arranging them optimally." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 113, + 341, + 495, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 341, + 495, + 355 + ], + "spans": [ + { + "bbox": [ + 113, + 341, + 495, + 355 + ], + "type": "text", + "content": "One approach is to place the black squares in a pattern such that each white square has as many adjacent black squares as possible. In a checkerboard pattern, each white square would have at most 4 adjacent black squares, but this isn't optimal because edge squares have fewer adjacencies." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 113, + 359, + 495, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 359, + 495, + 373 + ], + "spans": [ + { + "bbox": [ + 113, + 359, + 495, + 373 + ], + "type": "text", + "content": "Act, t a h n ." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 113, + 378, + 495, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 378, + 495, + 392 + ], + "spans": [ + { + "bbox": [ + 113, + 378, + 495, + 392 + ], + "type": "text", + "content": "For a bipartite graph, the maximum cut is achieved by putting all nodes of one type on one side and all nodes of the other type on the other side. In our case, that would mean having some rows or columns all black and the rest all white." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 113, + 396, + 495, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 396, + 495, + 416 + ], + "spans": [ + { + "bbox": [ + 113, + 396, + 495, + 416 + ], + "type": "text", + "content": "Let's say our grid is " + }, + { + "bbox": [ + 113, + 396, + 495, + 416 + ], + "type": "inline_equation", + "content": "\\mathbf{r} \\times \\mathbf{c}" + }, + { + "bbox": [ + 113, + 396, + 495, + 416 + ], + "type": "text", + "content": ", and we color some rows all black and the rest all white. If we color k rows all black, then the number of adjacencies between black and white squares would be " + }, + { + "bbox": [ + 113, + 396, + 495, + 416 + ], + "type": "inline_equation", + "content": "\\mathrm{k} \\times \\mathrm{c} \\times 2" + }, + { + "bbox": [ + 113, + 396, + 495, + 416 + ], + "type": "text", + "content": " (because each row has c squares, and each black square in a row can have at most 2 adjacencies with white squares, one above and one below)." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 113, + 421, + 495, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 421, + 495, + 441 + ], + "spans": [ + { + "bbox": [ + 113, + 421, + 495, + 441 + ], + "type": "text", + "content": "Let's consider a more specific pattern. Imagine our grid is " + }, + { + "bbox": [ + 113, + 421, + 495, + 441 + ], + "type": "inline_equation", + "content": "2 \\times 1009" + }, + { + "bbox": [ + 113, + 421, + 495, + 441 + ], + "type": "text", + "content": " (so it has 2018 squares total). We color the bottom row all black and the top row all white. Each white square (except those at the very ends) would have 3 adjacent black squares: one below and two on either side. The white squares at the ends would have 2 adjacent black squares." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 113, + 445, + 455, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 445, + 455, + 453 + ], + "spans": [ + { + "bbox": [ + 113, + 445, + 455, + 453 + ], + "type": "text", + "content": "So, we'd have 1007 white squares with 3 adjacencies each and 2 white squares with 2 adjacencies each. The total sum would be " + }, + { + "bbox": [ + 113, + 445, + 455, + 453 + ], + "type": "inline_equation", + "content": "1007 \\times 3 + 2 \\times 2 = 3025" + }, + { + "bbox": [ + 113, + 445, + 455, + 453 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 114, + 453, + 152, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 453, + 152, + 460 + ], + "spans": [ + { + "bbox": [ + 114, + 453, + 152, + 460 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 114, + 459, + 252, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 459, + 252, + 466 + ], + "spans": [ + { + "bbox": [ + 114, + 459, + 252, + 466 + ], + "type": "text", + "content": "Solving the Maximum Sum Problem for Diana's Rectangle" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 113, + 471, + 406, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 471, + 406, + 479 + ], + "spans": [ + { + "bbox": [ + 113, + 471, + 406, + 479 + ], + "type": "text", + "content": "I need to find the maximum possible sum of the numbers in white squares when Diana creates a rectangle made of 2018 squares." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 113, + 483, + 187, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 483, + 187, + 489 + ], + "spans": [ + { + "bbox": [ + 113, + 483, + 187, + 489 + ], + "type": "text", + "content": "## Understanding the Problem" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 114, + 489, + 318, + 521 + ], + "type": "list", + "angle": 0, + "index": 33, + "blocks": [ + { + "bbox": [ + 114, + 489, + 248, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 489, + 248, + 495 + ], + "spans": [ + { + "bbox": [ + 114, + 489, + 248, + 495 + ], + "type": "text", + "content": "- Diana draws a rectangle with some squares colored black." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 114, + 495, + 318, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 495, + 318, + 502 + ], + "spans": [ + { + "bbox": [ + 114, + 495, + 318, + 502 + ], + "type": "text", + "content": "- Each white square contains a number indicating how many adjacent black squares it has" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 114, + 502, + 242, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 502, + 242, + 509 + ], + "spans": [ + { + "bbox": [ + 114, + 502, + 242, + 509 + ], + "type": "text", + "content": "- Adjacent means sharing an edge (up, down, left, right)" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 114, + 509, + 317, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 509, + 317, + 515 + ], + "spans": [ + { + "bbox": [ + 114, + 509, + 317, + 515 + ], + "type": "text", + "content": "- Maximum possible number for any white square is 4 (if all 4 adjacent squares are black)" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 114, + 515, + 273, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 515, + 273, + 521 + ], + "spans": [ + { + "bbox": [ + 114, + 515, + 273, + 521 + ], + "type": "text", + "content": "- We need to find the maximum possible sum across all white squares" + } + ] + } + ], + "index": 32 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 526, + 142, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 526, + 142, + 533 + ], + "spans": [ + { + "bbox": [ + 113, + 526, + 142, + 533 + ], + "type": "text", + "content": "Strategy" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 114, + 533, + 223, + 552 + ], + "type": "list", + "angle": 0, + "index": 38, + "blocks": [ + { + "bbox": [ + 114, + 533, + 216, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 533, + 216, + 540 + ], + "spans": [ + { + "bbox": [ + 114, + 533, + 216, + 540 + ], + "type": "text", + "content": "To maximize the sum, I'll need to determine:" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 114, + 540, + 223, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 540, + 223, + 545 + ], + "spans": [ + { + "bbox": [ + 114, + 540, + 223, + 545 + ], + "type": "text", + "content": "1. How many squares should be black vs. white" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 114, + 545, + 216, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 545, + 216, + 552 + ], + "spans": [ + { + "bbox": [ + 114, + 545, + 216, + 552 + ], + "type": "text", + "content": "2. The optimal arrangement of these squares" + } + ] + } + ], + "index": 37 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 557, + 143, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 557, + 143, + 563 + ], + "spans": [ + { + "bbox": [ + 113, + 557, + 143, + 563 + ], + "type": "text", + "content": "Analysis" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 114, + 563, + 300, + 589 + ], + "type": "list", + "angle": 0, + "index": 44, + "blocks": [ + { + "bbox": [ + 114, + 563, + 209, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 563, + 209, + 571 + ], + "spans": [ + { + "bbox": [ + 114, + 563, + 209, + 571 + ], + "type": "text", + "content": "Let's consider what makes the sum large:" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 114, + 571, + 241, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 571, + 241, + 576 + ], + "spans": [ + { + "bbox": [ + 114, + 571, + 241, + 576 + ], + "type": "text", + "content": "- Each white square can contribute at most 4 to the sum" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 114, + 576, + 274, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 576, + 274, + 582 + ], + "spans": [ + { + "bbox": [ + 114, + 576, + 274, + 582 + ], + "type": "text", + "content": "- Each black square can contribute to at most 4 adjacent white squares" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 114, + 582, + 300, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 582, + 300, + 589 + ], + "spans": [ + { + "bbox": [ + 114, + 582, + 300, + 589 + ], + "type": "text", + "content": "- The total represents the number of adjacencies between black and white squares" + } + ] + } + ], + "index": 43 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 594, + 440, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 594, + 440, + 601 + ], + "spans": [ + { + "bbox": [ + 113, + 594, + 440, + 601 + ], + "type": "text", + "content": "For a 2018-square rectangle, the optimal shape matters. Let's try a " + }, + { + "bbox": [ + 113, + 594, + 440, + 601 + ], + "type": "inline_equation", + "content": "43 \\times 47" + }, + { + "bbox": [ + 113, + 594, + 440, + 601 + ], + "type": "text", + "content": " rectangle (43×47=2021, which is close enough to 2018 for our analysis)." + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 113, + 606, + 495, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 606, + 495, + 620 + ], + "spans": [ + { + "bbox": [ + 113, + 606, + 495, + 620 + ], + "type": "text", + "content": "If we create a checkerboard pattern, approximately half the squares will be black and half white. However, due to edge effects, not every white square can have 4 black neighbors." + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 113, + 625, + 183, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 625, + 183, + 632 + ], + "spans": [ + { + "bbox": [ + 113, + 625, + 183, + 632 + ], + "type": "text", + "content": "Calculating the Maximum" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 114, + 632, + 274, + 651 + ], + "type": "list", + "angle": 0, + "index": 51, + "blocks": [ + { + "bbox": [ + 114, + 632, + 274, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 632, + 274, + 638 + ], + "spans": [ + { + "bbox": [ + 114, + 632, + 274, + 638 + ], + "type": "text", + "content": "For a rectangle with dimensions " + }, + { + "bbox": [ + 114, + 632, + 274, + 638 + ], + "type": "inline_equation", + "content": "r \\times c" + }, + { + "bbox": [ + 114, + 632, + 274, + 638 + ], + "type": "text", + "content": ", the maximum sum occurs when:" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 114, + 638, + 234, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 638, + 234, + 644 + ], + "spans": [ + { + "bbox": [ + 114, + 638, + 234, + 644 + ], + "type": "text", + "content": "- Black squares are placed in a checkerboard pattern" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 114, + 644, + 274, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 644, + 274, + 651 + ], + "spans": [ + { + "bbox": [ + 114, + 644, + 274, + 651 + ], + "type": "text", + "content": "- The rectangle is as close to square as possible (minimizing perimeter)" + } + ] + } + ], + "index": 50 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 655, + 495, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 655, + 495, + 669 + ], + "spans": [ + { + "bbox": [ + 113, + 655, + 495, + 669 + ], + "type": "text", + "content": "For a nearly square grid with approximately 1009 white squares and 1009 black squares, each interior white square contributes 4, while edge white squares contribute less." + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 113, + 674, + 246, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 674, + 246, + 687 + ], + "spans": [ + { + "bbox": [ + 113, + 674, + 246, + 687 + ], + "type": "text", + "content": "From a theoretical analysis, the maximum sum would be: " + }, + { + "bbox": [ + 113, + 674, + 246, + 687 + ], + "type": "inline_equation", + "content": "-55 \\times 55 = 3025" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 114, + 692, + 197, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 692, + 197, + 700 + ], + "spans": [ + { + "bbox": [ + 114, + 692, + 197, + 700 + ], + "type": "text", + "content": "This corresponds to option (C) 3025." + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 114, + 704, + 140, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 704, + 140, + 711 + ], + "spans": [ + { + "bbox": [ + 114, + 704, + 140, + 711 + ], + "type": "text", + "content": "Answer: C" + } + ] + } + ], + "index": 55 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "44" + } + ] + } + ], + "index": 57 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 43 + }, + { + "para_blocks": [ + { + "bbox": [ + 263, + 89, + 341, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 263, + 89, + 341, + 102 + ], + "spans": [ + { + "bbox": [ + 263, + 89, + 341, + 102 + ], + "type": "text", + "content": "Inductive (Easy)" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 363, + 89, + 375, + 100 + ], + "blocks": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "lines": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "spans": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "type": "image", + "image_path": "cbd2f7e3140808a83ca69a0a2e1610dd9a654841f60dae20b2433020e2adb7e1.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 113, + 111, + 140, + 137 + ], + "blocks": [ + { + "bbox": [ + 113, + 111, + 140, + 137 + ], + "lines": [ + { + "bbox": [ + 113, + 111, + 140, + 137 + ], + "spans": [ + { + "bbox": [ + 113, + 111, + 140, + 137 + ], + "type": "image", + "image_path": "f9ae1628f4f27c67e42a309bcf565c5199ee7d184e1863bf889224ec5abf6921.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 145, + 723, + 465, + 736 + ], + "lines": [ + { + "bbox": [ + 145, + 723, + 465, + 736 + ], + "spans": [ + { + "bbox": [ + 145, + 723, + 465, + 736 + ], + "type": "text", + "content": "Figure 32: A sample error case of Inductive Reasoning (difficulty: Easy)." + } + ] + } + ], + "index": 75, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 125, + 139, + 129, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 139, + 129, + 144 + ], + "spans": [ + { + "bbox": [ + 125, + 139, + 129, + 144 + ], + "type": "text", + "content": "A." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 159, + 112, + 184, + 137 + ], + "blocks": [ + { + "bbox": [ + 159, + 112, + 184, + 137 + ], + "lines": [ + { + "bbox": [ + 159, + 112, + 184, + 137 + ], + "spans": [ + { + "bbox": [ + 159, + 112, + 184, + 137 + ], + "type": "image", + "image_path": "094995f7fd6661fa2cf01e8206d18ba8f31705d31dd9700b64a1e8c398d45ed3.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 169, + 139, + 174, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 169, + 139, + 174, + 145 + ], + "spans": [ + { + "bbox": [ + 169, + 139, + 174, + 145 + ], + "type": "text", + "content": "B" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 203, + 112, + 228, + 137 + ], + "blocks": [ + { + "bbox": [ + 203, + 112, + 228, + 137 + ], + "lines": [ + { + "bbox": [ + 203, + 112, + 228, + 137 + ], + "spans": [ + { + "bbox": [ + 203, + 112, + 228, + 137 + ], + "type": "image", + "image_path": "4d6f1428c39c2fafedb135682adbf4b8de0cd898e86e0609786958190341c596.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 212, + 139, + 217, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 212, + 139, + 217, + 145 + ], + "spans": [ + { + "bbox": [ + 212, + 139, + 217, + 145 + ], + "type": "text", + "content": "C" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 249, + 112, + 275, + 137 + ], + "blocks": [ + { + "bbox": [ + 249, + 112, + 275, + 137 + ], + "lines": [ + { + "bbox": [ + 249, + 112, + 275, + 137 + ], + "spans": [ + { + "bbox": [ + 249, + 112, + 275, + 137 + ], + "type": "image", + "image_path": "5ebbe461b93a2cbe955c72fb377c52e5c4b6a02d084792aa8be5cc43571a02a0.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 257, + 139, + 263, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 257, + 139, + 263, + 145 + ], + "spans": [ + { + "bbox": [ + 257, + 139, + 263, + 145 + ], + "type": "text", + "content": "D" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 282, + 112, + 488, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 282, + 112, + 488, + 133 + ], + "spans": [ + { + "bbox": [ + 282, + 112, + 488, + 133 + ], + "type": "text", + "content": "Question: Find one figure from the four below that displays a different pattern from the other three." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 282, + 139, + 351, + 150 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 282, + 139, + 351, + 150 + ], + "spans": [ + { + "bbox": [ + 282, + 139, + 351, + 150 + ], + "type": "text", + "content": "Ground Truth: (D)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 113, + 156, + 430, + 165 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 156, + 430, + 165 + ], + "spans": [ + { + "bbox": [ + 113, + 156, + 430, + 165 + ], + "type": "text", + "content": "Claude-3.7-Sonnet-Thinking: Let me analyze each figure to identify any patterns or differences among them:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 113, + 170, + 339, + 200 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 113, + 170, + 339, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 170, + 339, + 178 + ], + "spans": [ + { + "bbox": [ + 113, + 170, + 339, + 178 + ], + "type": "text", + "content": "Figure A: This is a hexagon with two curved lines inside and a black dot in one corner." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 114, + 178, + 339, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 178, + 339, + 186 + ], + "spans": [ + { + "bbox": [ + 114, + 178, + 339, + 186 + ], + "type": "text", + "content": "Figure B: This is a hexagon with two curved lines inside and a black dot in one corner." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 114, + 186, + 339, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 186, + 339, + 193 + ], + "spans": [ + { + "bbox": [ + 114, + 186, + 339, + 193 + ], + "type": "text", + "content": "Figure C: This is a hexagon with two curved lines inside and a black dot in one corner." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 114, + 193, + 339, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 193, + 339, + 200 + ], + "spans": [ + { + "bbox": [ + 114, + 193, + 339, + 200 + ], + "type": "text", + "content": "Figure D: This is a hexagon with two curved lines inside and a black dot in one corner." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 205, + 496, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 205, + 496, + 221 + ], + "spans": [ + { + "bbox": [ + 113, + 205, + 496, + 221 + ], + "type": "text", + "content": "All figures appear to have the same basic elements: a hexagon outline, two curved lines inside, and a black dot. Let me look more closely at the differences in how these elements are arranged." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 113, + 227, + 188, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 227, + 188, + 235 + ], + "spans": [ + { + "bbox": [ + 113, + 227, + 188, + 235 + ], + "type": "text", + "content": "Looking at the curved lines:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 114, + 235, + 373, + 263 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 114, + 235, + 373, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 235, + 373, + 242 + ], + "spans": [ + { + "bbox": [ + 114, + 235, + 373, + 242 + ], + "type": "text", + "content": "- Figure A: The two curved lines appear to form a single continuous curve that divides the hexagon." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 114, + 242, + 331, + 249 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 242, + 331, + 249 + ], + "spans": [ + { + "bbox": [ + 114, + 242, + 331, + 249 + ], + "type": "text", + "content": "- Figure B: The two curved lines appear to intersect or overlap within the hexagon." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 114, + 249, + 373, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 249, + 373, + 256 + ], + "spans": [ + { + "bbox": [ + 114, + 249, + 373, + 256 + ], + "type": "text", + "content": "- Figure C: The two curved lines appear to form a single continuous curve that divides the hexagon." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 114, + 256, + 373, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 256, + 373, + 263 + ], + "spans": [ + { + "bbox": [ + 114, + 256, + 373, + 263 + ], + "type": "text", + "content": "- Figure D: The two curved lines appear to form a single continuous curve that divides the hexagon." + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 269, + 204, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 269, + 204, + 277 + ], + "spans": [ + { + "bbox": [ + 113, + 269, + 204, + 277 + ], + "type": "text", + "content": "Looking at the position of the dot:" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 114, + 277, + 247, + 305 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 114, + 277, + 246, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 277, + 246, + 285 + ], + "spans": [ + { + "bbox": [ + 114, + 277, + 246, + 285 + ], + "type": "text", + "content": "- Figure A: The dot is in one corner of the hexagon." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 114, + 285, + 246, + 292 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 285, + 246, + 292 + ], + "spans": [ + { + "bbox": [ + 114, + 285, + 246, + 292 + ], + "type": "text", + "content": "- Figure B: The dot is in one corner of the hexagon." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 114, + 292, + 246, + 298 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 292, + 246, + 298 + ], + "spans": [ + { + "bbox": [ + 114, + 292, + 246, + 298 + ], + "type": "text", + "content": "- Figure C: The dot is in one corner of the hexagon." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 114, + 298, + 247, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 298, + 247, + 305 + ], + "spans": [ + { + "bbox": [ + 114, + 298, + 247, + 305 + ], + "type": "text", + "content": "- Figure D: The dot is in one corner of the hexagon." + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 312, + 281, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 312, + 281, + 319 + ], + "spans": [ + { + "bbox": [ + 113, + 312, + 281, + 319 + ], + "type": "text", + "content": "Let me look even more closely at the arrangement of the curves:" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 114, + 319, + 377, + 347 + ], + "type": "list", + "angle": 0, + "index": 37, + "blocks": [ + { + "bbox": [ + 114, + 319, + 353, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 319, + 353, + 326 + ], + "spans": [ + { + "bbox": [ + 114, + 319, + 353, + 326 + ], + "type": "text", + "content": "- Figure A: The curves divide the hexagon into sections, with one section containing the dot." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 114, + 327, + 377, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 327, + 377, + 334 + ], + "spans": [ + { + "bbox": [ + 114, + 327, + 377, + 334 + ], + "type": "text", + "content": "- Figure B: The curves create a pattern where they seem to overlap or create a more complex division." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 114, + 334, + 353, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 334, + 353, + 340 + ], + "spans": [ + { + "bbox": [ + 114, + 334, + 353, + 340 + ], + "type": "text", + "content": "- Figure C: The curves divide the hexagon into sections, with one section containing the dot." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 114, + 340, + 353, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 340, + 353, + 347 + ], + "spans": [ + { + "bbox": [ + 114, + 340, + 353, + 347 + ], + "type": "text", + "content": "- Figure D: The curves divide the hexagon into sections, with one section containing the dot." + } + ] + } + ], + "index": 36 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 354, + 496, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 354, + 496, + 376 + ], + "spans": [ + { + "bbox": [ + 113, + 354, + 496, + 376 + ], + "type": "text", + "content": "Based on this closer examination, Figure B appears to have a different pattern in how the curved lines are arranged within the hexagon compared to the other three figures. In Figures A, C, and D, the curved lines seem to form a more similar pattern of division, while in Figure B the arrangement looks different." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 113, + 381, + 330, + 404 + ], + "type": "list", + "angle": 0, + "index": 42, + "blocks": [ + { + "bbox": [ + 113, + 381, + 330, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 381, + 330, + 390 + ], + "spans": [ + { + "bbox": [ + 113, + 381, + 330, + 390 + ], + "type": "text", + "content": "Therefore, Figure B is the one that displays a different pattern from the other three." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 114, + 390, + 156, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 390, + 156, + 397 + ], + "spans": [ + { + "bbox": [ + 114, + 390, + 156, + 397 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 114, + 397, + 219, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 397, + 219, + 404 + ], + "spans": [ + { + "bbox": [ + 114, + 397, + 219, + 404 + ], + "type": "text", + "content": "# Pattern Recognition Problem Analysis" + } + ] + } + ], + "index": 41 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 410, + 353, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 410, + 353, + 418 + ], + "spans": [ + { + "bbox": [ + 113, + 410, + 353, + 418 + ], + "type": "text", + "content": "I need to analyze each figure and identify which one has a different pattern from the others." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 113, + 424, + 184, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 424, + 184, + 432 + ], + "spans": [ + { + "bbox": [ + 113, + 424, + 184, + 432 + ], + "type": "text", + "content": "Looking at all four figures:" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 114, + 432, + 281, + 460 + ], + "type": "list", + "angle": 0, + "index": 49, + "blocks": [ + { + "bbox": [ + 114, + 432, + 162, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 432, + 162, + 439 + ], + "spans": [ + { + "bbox": [ + 114, + 432, + 162, + 439 + ], + "type": "text", + "content": "- All are hexagons" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 114, + 439, + 197, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 439, + 197, + 445 + ], + "spans": [ + { + "bbox": [ + 114, + 439, + 197, + 445 + ], + "type": "text", + "content": "- All contain curved lines inside" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 114, + 445, + 187, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 445, + 187, + 453 + ], + "spans": [ + { + "bbox": [ + 114, + 445, + 187, + 453 + ], + "type": "text", + "content": "- All have a single black dot" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 114, + 453, + 281, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 453, + 281, + 460 + ], + "spans": [ + { + "bbox": [ + 114, + 453, + 281, + 460 + ], + "type": "text", + "content": "- All have two curved lines creating sections within the hexagon" + } + ] + } + ], + "index": 48 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 466, + 306, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 466, + 306, + 475 + ], + "spans": [ + { + "bbox": [ + 113, + 466, + 306, + 475 + ], + "type": "text", + "content": "Let me examine the specific arrangement of these elements in each figure:" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 114, + 481, + 148, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 481, + 148, + 487 + ], + "spans": [ + { + "bbox": [ + 114, + 481, + 148, + 487 + ], + "type": "text", + "content": "## Figure A:" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 114, + 488, + 299, + 510 + ], + "type": "list", + "angle": 0, + "index": 55, + "blocks": [ + { + "bbox": [ + 114, + 488, + 214, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 488, + 214, + 495 + ], + "spans": [ + { + "bbox": [ + 114, + 488, + 214, + 495 + ], + "type": "text", + "content": "- Two curved lines that create sections" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 114, + 495, + 249, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 495, + 249, + 502 + ], + "spans": [ + { + "bbox": [ + 114, + 495, + 249, + 502 + ], + "type": "text", + "content": "- The black dot is positioned in the lower left corner" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 114, + 502, + 299, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 502, + 299, + 510 + ], + "spans": [ + { + "bbox": [ + 114, + 502, + 299, + 510 + ], + "type": "text", + "content": "- The curves form a pattern that divides the hexagon into three sections" + } + ] + } + ], + "index": 54 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 114, + 516, + 147, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 516, + 147, + 523 + ], + "spans": [ + { + "bbox": [ + 114, + 516, + 147, + 523 + ], + "type": "text", + "content": "Figure B:" + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 114, + 523, + 313, + 545 + ], + "type": "list", + "angle": 0, + "index": 60, + "blocks": [ + { + "bbox": [ + 114, + 523, + 253, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 523, + 253, + 530 + ], + "spans": [ + { + "bbox": [ + 114, + 523, + 253, + 530 + ], + "type": "text", + "content": "- Two curved lines that appear to overlap or intersect" + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 114, + 530, + 255, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 530, + 255, + 536 + ], + "spans": [ + { + "bbox": [ + 114, + 530, + 255, + 536 + ], + "type": "text", + "content": "- The black dot is positioned in the lower right corner" + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 114, + 537, + 313, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 537, + 313, + 545 + ], + "spans": [ + { + "bbox": [ + 114, + 537, + 313, + 545 + ], + "type": "text", + "content": "- The curves create a different division pattern compared to the other figures" + } + ] + } + ], + "index": 59 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 114, + 551, + 148, + 558 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 551, + 148, + 558 + ], + "spans": [ + { + "bbox": [ + 114, + 551, + 148, + 558 + ], + "type": "text", + "content": "Figure C:" + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 114, + 559, + 250, + 580 + ], + "type": "list", + "angle": 0, + "index": 65, + "blocks": [ + { + "bbox": [ + 114, + 559, + 214, + 565 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 559, + 214, + 565 + ], + "spans": [ + { + "bbox": [ + 114, + 559, + 214, + 565 + ], + "type": "text", + "content": "- Two curved lines that create sections" + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 114, + 566, + 249, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 566, + 249, + 572 + ], + "spans": [ + { + "bbox": [ + 114, + 566, + 249, + 572 + ], + "type": "text", + "content": "- The black dot is positioned in the lower left corner" + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 114, + 572, + 250, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 572, + 250, + 580 + ], + "spans": [ + { + "bbox": [ + 114, + 572, + 250, + 580 + ], + "type": "text", + "content": "- The pattern of curve division is similar to Figure A" + } + ] + } + ], + "index": 64 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 114, + 586, + 148, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 586, + 148, + 594 + ], + "spans": [ + { + "bbox": [ + 114, + 586, + 148, + 594 + ], + "type": "text", + "content": "Figure D:" + } + ] + } + ], + "index": 66 + }, + { + "bbox": [ + 114, + 594, + 269, + 615 + ], + "type": "list", + "angle": 0, + "index": 70, + "blocks": [ + { + "bbox": [ + 114, + 594, + 214, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 594, + 214, + 601 + ], + "spans": [ + { + "bbox": [ + 114, + 594, + 214, + 601 + ], + "type": "text", + "content": "- Two curved lines that create sections" + } + ] + } + ], + "index": 67 + }, + { + "bbox": [ + 114, + 601, + 253, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 601, + 253, + 608 + ], + "spans": [ + { + "bbox": [ + 114, + 601, + 253, + 608 + ], + "type": "text", + "content": "- The black dot is positioned in the lower right corner" + } + ] + } + ], + "index": 68 + }, + { + "bbox": [ + 114, + 608, + 269, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 608, + 269, + 615 + ], + "spans": [ + { + "bbox": [ + 114, + 608, + 269, + 615 + ], + "type": "text", + "content": "- The pattern of curve division is similar to Figures A and C." + } + ] + } + ], + "index": 69 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 621, + 496, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 621, + 496, + 637 + ], + "spans": [ + { + "bbox": [ + 113, + 621, + 496, + 637 + ], + "type": "text", + "content": "The key difference is in how the curved lines are arranged. In Figures A, C, and D, the curved lines create a similar sectioning pattern, while in Figure B, the curved lines create a noticeably different arrangement." + } + ] + } + ], + "index": 71 + }, + { + "bbox": [ + 114, + 643, + 143, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 643, + 143, + 650 + ], + "spans": [ + { + "bbox": [ + 114, + 643, + 143, + 650 + ], + "type": "text", + "content": "Answer: B" + } + ] + } + ], + "index": 72 + }, + { + "bbox": [ + 113, + 654, + 232, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 654, + 232, + 663 + ], + "spans": [ + { + "bbox": [ + 113, + 654, + 232, + 663 + ], + "type": "text", + "content": "Error Category: Reasoning Error" + } + ] + } + ], + "index": 73 + }, + { + "bbox": [ + 113, + 664, + 496, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 664, + 496, + 712 + ], + "spans": [ + { + "bbox": [ + 113, + 664, + 496, + 712 + ], + "type": "text", + "content": "Error Reason: The model makes a reasoning error by overemphasizing minor variations in the curve overlap while overlooking the more fundamental structural difference. Figure D is the only one where the black dot and curves are positioned differently, breaking the symmetry seen in A, B, and C. The model incorrectly identifies B as the outlier due to superficial curve complexity rather than recognizing the true deviation in spatial structure." + } + ] + } + ], + "index": 74 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "45" + } + ] + } + ], + "index": 76 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 44 + }, + { + "para_blocks": [ + { + "bbox": [ + 263, + 89, + 341, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 263, + 89, + 341, + 102 + ], + "spans": [ + { + "bbox": [ + 263, + 89, + 341, + 102 + ], + "type": "text", + "content": "Inductive (Easy)" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 363, + 89, + 375, + 100 + ], + "blocks": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "lines": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "spans": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "type": "image", + "image_path": "afd0faec9feb947504d44ca25613ade5faa37fee1a4f3a557b8ced9f2809e7ed.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 173, + 108, + 253, + 166 + ], + "blocks": [ + { + "bbox": [ + 173, + 108, + 253, + 166 + ], + "lines": [ + { + "bbox": [ + 173, + 108, + 253, + 166 + ], + "spans": [ + { + "bbox": [ + 173, + 108, + 253, + 166 + ], + "type": "image", + "image_path": "620f6d56098b109afe8ae4b63539ad8f4f4d3e720dff2c685ff3ad62e3d73bee.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 121, + 171, + 176, + 178 + ], + "lines": [ + { + "bbox": [ + 121, + 171, + 176, + 178 + ], + "spans": [ + { + "bbox": [ + 121, + 171, + 176, + 178 + ], + "type": "text", + "content": "A. 3.5.6.7" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 209, + 171, + 261, + 178 + ], + "lines": [ + { + "bbox": [ + 209, + 171, + 261, + 178 + ], + "spans": [ + { + "bbox": [ + 209, + 171, + 261, + 178 + ], + "type": "text", + "content": "B." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 121, + 185, + 176, + 193 + ], + "lines": [ + { + "bbox": [ + 121, + 185, + 176, + 193 + ], + "spans": [ + { + "bbox": [ + 121, + 185, + 176, + 193 + ], + "type": "text", + "content": "C." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 209, + 185, + 263, + 192 + ], + "lines": [ + { + "bbox": [ + 209, + 185, + 263, + 192 + ], + "spans": [ + { + "bbox": [ + 209, + 185, + 263, + 192 + ], + "type": "text", + "content": "D." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 140, + 723, + 468, + 736 + ], + "lines": [ + { + "bbox": [ + 140, + 723, + 468, + 736 + ], + "spans": [ + { + "bbox": [ + 140, + 723, + 468, + 736 + ], + "type": "text", + "content": "Figure 33: A sample correct case of Inductive Reasoning (difficulty: Easy)." + } + ] + } + ], + "index": 77, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 272, + 125, + 488, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 272, + 125, + 488, + 146 + ], + "spans": [ + { + "bbox": [ + 272, + 125, + 488, + 146 + ], + "type": "text", + "content": "Question: Analyze the pattern of changes in the figure below, and choose the appropriate fifth row:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 273, + 152, + 342, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 273, + 152, + 342, + 163 + ], + "spans": [ + { + "bbox": [ + 273, + 152, + 342, + 163 + ], + "type": "text", + "content": "Ground Truth: (B)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 113, + 198, + 496, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 198, + 496, + 212 + ], + "spans": [ + { + "bbox": [ + 113, + 198, + 496, + 212 + ], + "type": "text", + "content": "Claude-3.7-Sonnet-Thinking: Let me analyze the pattern in this figure carefully. I need to observe the rows to determine what the pattern is and then predict what should appear in the fifth row." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 114, + 217, + 170, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 217, + 170, + 223 + ], + "spans": [ + { + "bbox": [ + 114, + 217, + 170, + 223 + ], + "type": "text", + "content": "Row 1:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 114, + 224, + 170, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 224, + 170, + 229 + ], + "spans": [ + { + "bbox": [ + 114, + 224, + 170, + 229 + ], + "type": "text", + "content": "Row 2:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 114, + 229, + 170, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 229, + 170, + 235 + ], + "spans": [ + { + "bbox": [ + 114, + 229, + 170, + 235 + ], + "type": "text", + "content": "Row 3:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 114, + 235, + 170, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 235, + 170, + 241 + ], + "spans": [ + { + "bbox": [ + 114, + 235, + 170, + 241 + ], + "type": "text", + "content": "Row 4:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 114, + 247, + 240, + 274 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 114, + 247, + 198, + 254 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 247, + 198, + 254 + ], + "spans": [ + { + "bbox": [ + 114, + 247, + 198, + 254 + ], + "type": "text", + "content": "Looking at this pattern, I notice that:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 114, + 255, + 173, + 261 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 255, + 173, + 261 + ], + "spans": [ + { + "bbox": [ + 114, + 255, + 173, + 261 + ], + "type": "text", + "content": "- Each row has 7 symbols" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 114, + 261, + 240, + 267 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 261, + 240, + 267 + ], + "spans": [ + { + "bbox": [ + 114, + 261, + 240, + 267 + ], + "type": "text", + "content": "Each row contains a mix of hearts " + }, + { + "bbox": [ + 114, + 261, + 240, + 267 + ], + "type": "inline_equation", + "content": "(\\bigcirc)" + }, + { + "bbox": [ + 114, + 261, + 240, + 267 + ], + "type": "text", + "content": " and flowers " + }, + { + "bbox": [ + 114, + 261, + 240, + 267 + ], + "type": "inline_equation", + "content": "(\\bullet)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 114, + 267, + 199, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 267, + 199, + 274 + ], + "spans": [ + { + "bbox": [ + 114, + 267, + 199, + 274 + ], + "type": "text", + "content": "- There seems to be a shifting pattern" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 114, + 278, + 230, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 278, + 230, + 285 + ], + "spans": [ + { + "bbox": [ + 114, + 278, + 230, + 285 + ], + "type": "text", + "content": "I notice that if I track the position of the hearts " + }, + { + "bbox": [ + 114, + 278, + 230, + 285 + ], + "type": "inline_equation", + "content": "(\\bigcirc)" + }, + { + "bbox": [ + 114, + 278, + 230, + 285 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 114, + 286, + 170, + 309 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 114, + 286, + 170, + 292 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 286, + 170, + 292 + ], + "spans": [ + { + "bbox": [ + 114, + 286, + 170, + 292 + ], + "type": "text", + "content": "-Row 1:positions 1,3,4" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 114, + 292, + 170, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 292, + 170, + 297 + ], + "spans": [ + { + "bbox": [ + 114, + 292, + 170, + 297 + ], + "type": "text", + "content": "-Row 2:positions 2,4,5" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 114, + 298, + 170, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 298, + 170, + 304 + ], + "spans": [ + { + "bbox": [ + 114, + 298, + 170, + 304 + ], + "type": "text", + "content": "-Row 3:positions 3,5,6" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 114, + 304, + 170, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 304, + 170, + 309 + ], + "spans": [ + { + "bbox": [ + 114, + 304, + 170, + 309 + ], + "type": "text", + "content": "-Row 4:positions 4,6,7" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 316, + 495, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 316, + 495, + 329 + ], + "spans": [ + { + "bbox": [ + 113, + 316, + 495, + 329 + ], + "type": "text", + "content": "It looks like the hearts are shifting one position to the right in each row. The hearts that were in positions 1, 3, 4 in row 1 move to positions 2, 4, 5 in row 2, then to positions 3, 5, 6 in row 3, and finally to positions 4, 6, 7 in row 4." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 113, + 334, + 407, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 334, + 407, + 342 + ], + "spans": [ + { + "bbox": [ + 113, + 334, + 407, + 342 + ], + "type": "text", + "content": "Following this pattern, in row 5, the hearts should be in positions 5, 7, and there should be one that \"wraps around\" to position 1." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 113, + 346, + 214, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 346, + 214, + 353 + ], + "spans": [ + { + "bbox": [ + 113, + 346, + 214, + 353 + ], + "type": "text", + "content": "So the fifth row should be: " + }, + { + "bbox": [ + 113, + 346, + 214, + 353 + ], + "type": "inline_equation", + "content": "\\bigcirc \\bigcirc \\bigcirc \\bigcirc \\bigcirc \\bigcirc" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 114, + 358, + 201, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 358, + 201, + 365 + ], + "spans": [ + { + "bbox": [ + 114, + 358, + 201, + 365 + ], + "type": "text", + "content": "Now let me check the answer choices:" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 114, + 365, + 226, + 389 + ], + "type": "list", + "angle": 0, + "index": 34, + "blocks": [ + { + "bbox": [ + 114, + 365, + 206, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 365, + 206, + 371 + ], + "spans": [ + { + "bbox": [ + 114, + 365, + 206, + 371 + ], + "type": "text", + "content": "A. " + }, + { + "bbox": [ + 114, + 365, + 206, + 371 + ], + "type": "inline_equation", + "content": "\\bullet \\bullet \\bullet \\bullet \\bullet \\bullet \\bullet" + }, + { + "bbox": [ + 114, + 365, + 206, + 371 + ], + "type": "text", + "content": " - This doesn't match" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 114, + 372, + 226, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 372, + 226, + 377 + ], + "spans": [ + { + "bbox": [ + 114, + 372, + 226, + 377 + ], + "type": "text", + "content": "B.♥ - This matches my prediction" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 114, + 377, + 206, + 383 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 377, + 206, + 383 + ], + "spans": [ + { + "bbox": [ + 114, + 377, + 206, + 383 + ], + "type": "text", + "content": "C. " + }, + { + "bbox": [ + 114, + 377, + 206, + 383 + ], + "type": "inline_equation", + "content": "\\because \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\quad \\square \\cdot" + }, + { + "bbox": [ + 114, + 377, + 206, + 383 + ], + "type": "text", + "content": " This doesn't match" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 114, + 384, + 206, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 384, + 206, + 389 + ], + "spans": [ + { + "bbox": [ + 114, + 384, + 206, + 389 + ], + "type": "text", + "content": "D. " + }, + { + "bbox": [ + 114, + 384, + 206, + 389 + ], + "type": "inline_equation", + "content": "\\bigstar \\bigstar \\bigstar \\bigstar \\bigstar \\bigstar \\bigstar \\bigstar -" + }, + { + "bbox": [ + 114, + 384, + 206, + 389 + ], + "type": "text", + "content": " This doesn't match" + } + ] + } + ], + "index": 33 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 114, + 396, + 176, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 396, + 176, + 402 + ], + "spans": [ + { + "bbox": [ + 114, + 396, + 176, + 402 + ], + "type": "text", + "content": "So the answer should be B." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 114, + 407, + 280, + 439 + ], + "type": "list", + "angle": 0, + "index": 41, + "blocks": [ + { + "bbox": [ + 114, + 407, + 280, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 407, + 280, + 415 + ], + "spans": [ + { + "bbox": [ + 114, + 407, + 280, + 415 + ], + "type": "text", + "content": "Let me double-check by looking at the pattern of the flowers " + }, + { + "bbox": [ + 114, + 407, + 280, + 415 + ], + "type": "inline_equation", + "content": "(\\star)" + }, + { + "bbox": [ + 114, + 407, + 280, + 415 + ], + "type": "text", + "content": " as well:" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 114, + 415, + 176, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 415, + 176, + 421 + ], + "spans": [ + { + "bbox": [ + 114, + 415, + 176, + 421 + ], + "type": "text", + "content": "-Row 1:positions2,5,6,7" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 114, + 421, + 176, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 421, + 176, + 426 + ], + "spans": [ + { + "bbox": [ + 114, + 421, + 176, + 426 + ], + "type": "text", + "content": "-Row 2:positions1,3,6,7" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 114, + 427, + 176, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 427, + 176, + 433 + ], + "spans": [ + { + "bbox": [ + 114, + 427, + 176, + 433 + ], + "type": "text", + "content": "-Row 3:positions1,2,4,7" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 114, + 434, + 176, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 434, + 176, + 439 + ], + "spans": [ + { + "bbox": [ + 114, + 434, + 176, + 439 + ], + "type": "text", + "content": "-Row 4:positions1,2,3,5" + } + ] + } + ], + "index": 40 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 114, + 445, + 345, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 445, + 345, + 452 + ], + "spans": [ + { + "bbox": [ + 114, + 445, + 345, + 452 + ], + "type": "text", + "content": "The flowers are in all positions where hearts are not. So in row 5, they should be in positions 2, 3, 4, 6." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 114, + 452, + 179, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 452, + 179, + 457 + ], + "spans": [ + { + "bbox": [ + 114, + 452, + 179, + 457 + ], + "type": "text", + "content": "That gives: " + }, + { + "bbox": [ + 114, + 452, + 179, + 457 + ], + "type": "inline_equation", + "content": "\\text{心}" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 114, + 457, + 242, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 457, + 242, + 464 + ], + "spans": [ + { + "bbox": [ + 114, + 457, + 242, + 464 + ], + "type": "text", + "content": "This confirms that option B (♥ ♀ ♀ ♀ ♀ ♀) is correct." + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 114, + 464, + 151, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 464, + 151, + 470 + ], + "spans": [ + { + "bbox": [ + 114, + 464, + 151, + 470 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 114, + 470, + 200, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 470, + 200, + 476 + ], + "spans": [ + { + "bbox": [ + 114, + 470, + 200, + 476 + ], + "type": "text", + "content": "Analyzing the Pattern in the Figure" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 114, + 482, + 328, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 482, + 328, + 489 + ], + "spans": [ + { + "bbox": [ + 114, + 482, + 328, + 489 + ], + "type": "text", + "content": "I need to identify the pattern of changes across rows and predict what the fifth row should be." + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 114, + 495, + 183, + 500 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 495, + 183, + 500 + ], + "spans": [ + { + "bbox": [ + 114, + 495, + 183, + 500 + ], + "type": "text", + "content": "## Observation of the Pattern" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 114, + 506, + 192, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 506, + 192, + 513 + ], + "spans": [ + { + "bbox": [ + 114, + 506, + 192, + 513 + ], + "type": "text", + "content": "Looking at the existing four rows:" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 114, + 514, + 172, + 536 + ], + "type": "list", + "angle": 0, + "index": 54, + "blocks": [ + { + "bbox": [ + 114, + 514, + 172, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 514, + 172, + 518 + ], + "spans": [ + { + "bbox": [ + 114, + 514, + 172, + 518 + ], + "type": "text", + "content": "-Row1:" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 114, + 519, + 172, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 519, + 172, + 524 + ], + "spans": [ + { + "bbox": [ + 114, + 519, + 172, + 524 + ], + "type": "text", + "content": "-Row2:" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 114, + 525, + 172, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 525, + 172, + 529 + ], + "spans": [ + { + "bbox": [ + 114, + 525, + 172, + 529 + ], + "type": "text", + "content": "-Row 3:" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 114, + 530, + 172, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 530, + 172, + 536 + ], + "spans": [ + { + "bbox": [ + 114, + 530, + 172, + 536 + ], + "type": "text", + "content": "-Row 4:" + } + ] + } + ], + "index": 53 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 114, + 544, + 160, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 544, + 160, + 550 + ], + "spans": [ + { + "bbox": [ + 114, + 544, + 160, + 550 + ], + "type": "text", + "content": "## Pattern Analysis" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 114, + 556, + 267, + 587 + ], + "type": "list", + "angle": 0, + "index": 61, + "blocks": [ + { + "bbox": [ + 114, + 556, + 267, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 556, + 267, + 563 + ], + "spans": [ + { + "bbox": [ + 114, + 556, + 267, + 563 + ], + "type": "text", + "content": "I notice that the position of the hearts " + }, + { + "bbox": [ + 114, + 556, + 267, + 563 + ], + "type": "inline_equation", + "content": "(\\hat{\\mathbb{V}})" + }, + { + "bbox": [ + 114, + 556, + 267, + 563 + ], + "type": "text", + "content": " shifts in a consistent way:" + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 114, + 563, + 192, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 563, + 192, + 569 + ], + "spans": [ + { + "bbox": [ + 114, + 563, + 192, + 569 + ], + "type": "text", + "content": "-Row 1: Hearts in positions 1,3,4" + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 114, + 569, + 192, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 569, + 192, + 574 + ], + "spans": [ + { + "bbox": [ + 114, + 569, + 192, + 574 + ], + "type": "text", + "content": "-Row 2: Hearts in positions 2,4,5" + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 114, + 575, + 192, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 575, + 192, + 581 + ], + "spans": [ + { + "bbox": [ + 114, + 575, + 192, + 581 + ], + "type": "text", + "content": "-Row 3: Hearts in positions 3,5,6" + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 114, + 581, + 192, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 581, + 192, + 587 + ], + "spans": [ + { + "bbox": [ + 114, + 581, + 192, + 587 + ], + "type": "text", + "content": "-Row 4: Hearts in positions 4,6,7" + } + ] + } + ], + "index": 60 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 593, + 371, + 600 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 593, + 371, + 600 + ], + "spans": [ + { + "bbox": [ + 113, + 593, + 371, + 600 + ], + "type": "text", + "content": "The hearts are shifting one position to the right in each row, with a \"wraparound\" effect when they reach the end." + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 114, + 605, + 179, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 605, + 179, + 612 + ], + "spans": [ + { + "bbox": [ + 114, + 605, + 179, + 612 + ], + "type": "text", + "content": "Predicting the Fifth Row" + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 114, + 617, + 264, + 635 + ], + "type": "list", + "angle": 0, + "index": 67, + "blocks": [ + { + "bbox": [ + 114, + 617, + 264, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 617, + 264, + 624 + ], + "spans": [ + { + "bbox": [ + 114, + 617, + 264, + 624 + ], + "type": "text", + "content": "Following this pattern, in row 5, the hearts should be in positions:" + } + ] + } + ], + "index": 64 + }, + { + "bbox": [ + 114, + 624, + 157, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 624, + 157, + 629 + ], + "spans": [ + { + "bbox": [ + 114, + 624, + 157, + 629 + ], + "type": "text", + "content": "-5 shifted from 4)" + } + ] + } + ], + "index": 65 + }, + { + "bbox": [ + 114, + 630, + 157, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 630, + 157, + 635 + ], + "spans": [ + { + "bbox": [ + 114, + 630, + 157, + 635 + ], + "type": "text", + "content": "-7 (shifted from 6)" + } + ] + } + ], + "index": 66 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 114, + 637, + 198, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 637, + 198, + 643 + ], + "spans": [ + { + "bbox": [ + 114, + 637, + 198, + 643 + ], + "type": "text", + "content": "-1 (shifted from 7, wrapped around)" + } + ] + } + ], + "index": 68 + }, + { + "bbox": [ + 114, + 648, + 214, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 648, + 214, + 654 + ], + "spans": [ + { + "bbox": [ + 114, + 648, + 214, + 654 + ], + "type": "text", + "content": "So the fifth row should be:" + } + ] + } + ], + "index": 69 + }, + { + "bbox": [ + 114, + 661, + 172, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 661, + 172, + 667 + ], + "spans": [ + { + "bbox": [ + 114, + 661, + 172, + 667 + ], + "type": "text", + "content": "## Checking the Options" + } + ] + } + ], + "index": 70 + }, + { + "bbox": [ + 114, + 673, + 226, + 697 + ], + "type": "list", + "angle": 0, + "index": 75, + "blocks": [ + { + "bbox": [ + 114, + 673, + 192, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 673, + 192, + 679 + ], + "spans": [ + { + "bbox": [ + 114, + 673, + 192, + 679 + ], + "type": "text", + "content": "A. " + }, + { + "bbox": [ + 114, + 673, + 192, + 679 + ], + "type": "inline_equation", + "content": "\\bullet \\bullet \\bullet \\bullet \\bullet \\bullet \\bullet -\\mathrm{Not}" + }, + { + "bbox": [ + 114, + 673, + 192, + 679 + ], + "type": "text", + "content": " a match" + } + ] + } + ], + "index": 71 + }, + { + "bbox": [ + 114, + 679, + 226, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 679, + 226, + 685 + ], + "spans": [ + { + "bbox": [ + 114, + 679, + 226, + 685 + ], + "type": "text", + "content": "B.♥ -This matches my prediction" + } + ] + } + ], + "index": 72 + }, + { + "bbox": [ + 114, + 686, + 192, + 692 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 686, + 192, + 692 + ], + "spans": [ + { + "bbox": [ + 114, + 686, + 192, + 692 + ], + "type": "text", + "content": "C.♥ -Not a match" + } + ] + } + ], + "index": 73 + }, + { + "bbox": [ + 114, + 692, + 192, + 697 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 692, + 192, + 697 + ], + "spans": [ + { + "bbox": [ + 114, + 692, + 192, + 697 + ], + "type": "text", + "content": "D. \\(\\text{串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串串" + } + ] + } + ], + "index": 74 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 114, + 704, + 139, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 704, + 139, + 710 + ], + "spans": [ + { + "bbox": [ + 114, + 704, + 139, + 710 + ], + "type": "text", + "content": "Answer: B" + } + ] + } + ], + "index": 76 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "46" + } + ] + } + ], + "index": 78 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 45 + }, + { + "para_blocks": [ + { + "bbox": [ + 254, + 89, + 351, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 254, + 89, + 351, + 102 + ], + "spans": [ + { + "bbox": [ + 254, + 89, + 351, + 102 + ], + "type": "text", + "content": "Inductive (Medium)" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 363, + 89, + 375, + 101 + ], + "blocks": [ + { + "bbox": [ + 363, + 89, + 375, + 101 + ], + "lines": [ + { + "bbox": [ + 363, + 89, + 375, + 101 + ], + "spans": [ + { + "bbox": [ + 363, + 89, + 375, + 101 + ], + "type": "image", + "image_path": "88905e754aef245617dad71ab60cf29cbaa7de0c2cdc7bfa6246797edc486a4a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 125, + 111, + 276, + 145 + ], + "blocks": [ + { + "bbox": [ + 125, + 111, + 276, + 145 + ], + "lines": [ + { + "bbox": [ + 125, + 111, + 276, + 145 + ], + "spans": [ + { + "bbox": [ + 125, + 111, + 276, + 145 + ], + "type": "image", + "image_path": "9b0cd4b7d45ea96e77e1a76fa635f68d59efca6ae415ddcea6116702d2a08391.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 136, + 723, + 473, + 736 + ], + "lines": [ + { + "bbox": [ + 136, + 723, + 473, + 736 + ], + "spans": [ + { + "bbox": [ + 136, + 723, + 473, + 736 + ], + "type": "text", + "content": "Figure 34: A sample error case of Inductive Reasoning (difficulty: Medium)." + } + ] + } + ], + "index": 51, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 140, + 149, + 259, + 182 + ], + "blocks": [ + { + "bbox": [ + 140, + 149, + 259, + 182 + ], + "lines": [ + { + "bbox": [ + 140, + 149, + 259, + 182 + ], + "spans": [ + { + "bbox": [ + 140, + 149, + 259, + 182 + ], + "type": "image", + "image_path": "f43ef894cc362d7c0b9717dfbc06a4a9667abb2d5aff08d0cb8cfc765e949aa3.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 153, + 186, + 160, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 186, + 160, + 191 + ], + "spans": [ + { + "bbox": [ + 153, + 186, + 160, + 191 + ], + "type": "text", + "content": "A" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 183, + 186, + 190, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 183, + 186, + 190, + 191 + ], + "spans": [ + { + "bbox": [ + 183, + 186, + 190, + 191 + ], + "type": "text", + "content": "B" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 211, + 186, + 216, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 186, + 216, + 191 + ], + "spans": [ + { + "bbox": [ + 211, + 186, + 216, + 191 + ], + "type": "text", + "content": "C" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 244, + 186, + 247, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 186, + 247, + 191 + ], + "spans": [ + { + "bbox": [ + 244, + 186, + 247, + 191 + ], + "type": "text", + "content": "D" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 288, + 125, + 490, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 288, + 125, + 490, + 156 + ], + "spans": [ + { + "bbox": [ + 288, + 125, + 490, + 156 + ], + "type": "text", + "content": "Question: Choose the most appropriate option from the four given choices to fill in the question mark, so that the figures follow a pattern." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 288, + 167, + 358, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 288, + 167, + 358, + 178 + ], + "spans": [ + { + "bbox": [ + 288, + 167, + 358, + 178 + ], + "type": "text", + "content": "Ground Truth: (C)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 113, + 198, + 497, + 214 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 198, + 497, + 214 + ], + "spans": [ + { + "bbox": [ + 113, + 198, + 497, + 214 + ], + "type": "text", + "content": "Claude-3.7-Sonnet-Thinking: Let me analyze the pattern in the given sequence of figures and find what should come in place of the question mark." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 113, + 219, + 441, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 219, + 441, + 228 + ], + "spans": [ + { + "bbox": [ + 113, + 219, + 441, + 228 + ], + "type": "text", + "content": "The sequence shows hexagons with internal lines. Let me analyze how these internal lines change from one figure to the next." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 113, + 233, + 399, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 233, + 399, + 243 + ], + "spans": [ + { + "bbox": [ + 113, + 233, + 399, + 243 + ], + "type": "text", + "content": "First figure: A hexagon with a single line from the top vertex going down to about the middle of the hexagon." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 113, + 247, + 404, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 247, + 404, + 256 + ], + "spans": [ + { + "bbox": [ + 113, + 247, + 404, + 256 + ], + "type": "text", + "content": "Second figure: A hexagon with a single line from the upper-right vertex going to the middle-left of the hexagon." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 113, + 261, + 400, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 261, + 400, + 270 + ], + "spans": [ + { + "bbox": [ + 113, + 261, + 400, + 270 + ], + "type": "text", + "content": "Third figure: A hexagon with a single line from the bottom-right vertex going to the upper-left of the hexagon." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 113, + 275, + 403, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 275, + 403, + 285 + ], + "spans": [ + { + "bbox": [ + 113, + 275, + 403, + 285 + ], + "type": "text", + "content": "Fourth figure: A hexagon with a single line from the bottom-left vertex going to the upper-right of the hexagon." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 112, + 289, + 496, + 306 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 289, + 496, + 306 + ], + "spans": [ + { + "bbox": [ + 112, + 289, + 496, + 306 + ], + "type": "text", + "content": "So the pattern seems to be that the line rotates in a clockwise direction around the hexagon. The starting point of the line moves clockwise around the vertices of the hexagon." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 112, + 311, + 496, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 311, + 496, + 327 + ], + "spans": [ + { + "bbox": [ + 112, + 311, + 496, + 327 + ], + "type": "text", + "content": "Given this pattern, the fifth figure (the one with the question mark) should have a line from the lower vertex (at the bottom) going up to the upper-right of the hexagon." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 113, + 332, + 189, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 332, + 189, + 340 + ], + "spans": [ + { + "bbox": [ + 113, + 332, + 189, + 340 + ], + "type": "text", + "content": "Now let's check the options:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 113, + 340, + 316, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 340, + 316, + 347 + ], + "spans": [ + { + "bbox": [ + 113, + 340, + 316, + 347 + ], + "type": "text", + "content": "Option A: A hexagon with a line from the left vertex going to the right vertex." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 113, + 347, + 331, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 347, + 331, + 354 + ], + "spans": [ + { + "bbox": [ + 113, + 347, + 331, + 354 + ], + "type": "text", + "content": "Option B: A hexagon with a line from the upper-left vertex going to the lower-right." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 113, + 354, + 318, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 354, + 318, + 361 + ], + "spans": [ + { + "bbox": [ + 113, + 354, + 318, + 361 + ], + "type": "text", + "content": "Option C: A hexagon with a line from the top vertex going to the bottom-right." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 113, + 361, + 304, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 361, + 304, + 368 + ], + "spans": [ + { + "bbox": [ + 113, + 361, + 304, + 368 + ], + "type": "text", + "content": "Option D: A hexagon with a line from the top vertex going to the bottom." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 113, + 374, + 495, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 374, + 495, + 390 + ], + "spans": [ + { + "bbox": [ + 113, + 374, + 495, + 390 + ], + "type": "text", + "content": "Looking at the pattern and the next expected figure, option A seems closest to what I'm expecting - a line going from the left side to the right side. Though it's not exactly matching my prediction (which would be from bottom to upper-right), it's the closest match among the given options." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 113, + 396, + 496, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 396, + 496, + 411 + ], + "spans": [ + { + "bbox": [ + 113, + 396, + 496, + 411 + ], + "type": "text", + "content": "OK, I think I've been struggling because I don't have a completely clear view of the images. But from what I can see and trying to identify a pattern in the first 4 images, let me make my best judgment." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 113, + 416, + 496, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 416, + 496, + 432 + ], + "spans": [ + { + "bbox": [ + 113, + 416, + 496, + 432 + ], + "type": "text", + "content": "The line inside the hexagon appears to be moving in some pattern as we go from one figure to the next. Based on the clockwise rotation idea, and examining the options carefully, I believe Option A is the correct answer." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 113, + 432, + 157, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 432, + 157, + 438 + ], + "spans": [ + { + "bbox": [ + 113, + 432, + 157, + 438 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 113, + 438, + 228, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 438, + 228, + 445 + ], + "spans": [ + { + "bbox": [ + 113, + 438, + 228, + 445 + ], + "type": "text", + "content": "Analysis of Pattern in Hexagon Sequence" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 113, + 452, + 422, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 452, + 422, + 460 + ], + "spans": [ + { + "bbox": [ + 113, + 452, + 422, + 460 + ], + "type": "text", + "content": "I need to identify the pattern in the sequence of figures and determine which option should replace the question mark." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 113, + 466, + 182, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 466, + 182, + 474 + ], + "spans": [ + { + "bbox": [ + 113, + 466, + 182, + 474 + ], + "type": "text", + "content": "Examining the Pattern" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 113, + 479, + 251, + 488 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 479, + 251, + 488 + ], + "spans": [ + { + "bbox": [ + 113, + 479, + 251, + 488 + ], + "type": "text", + "content": "Let me analyze the first four figures in the sequence:" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 113, + 494, + 331, + 523 + ], + "type": "list", + "angle": 0, + "index": 36, + "blocks": [ + { + "bbox": [ + 113, + 494, + 320, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 494, + 320, + 502 + ], + "spans": [ + { + "bbox": [ + 113, + 494, + 320, + 502 + ], + "type": "text", + "content": "1. First figure: A hexagon with a line from the top vertex to the bottom-left side" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 113, + 502, + 331, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 502, + 331, + 509 + ], + "spans": [ + { + "bbox": [ + 113, + 502, + 331, + 509 + ], + "type": "text", + "content": "2. Second figure: A hexagon with a line from the top-right vertex to the bottom side" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 113, + 509, + 316, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 509, + 316, + 516 + ], + "spans": [ + { + "bbox": [ + 113, + 509, + 316, + 516 + ], + "type": "text", + "content": "3. Third figure: A hexagon with a line from the right vertex to the top-left side" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 113, + 516, + 329, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 516, + 329, + 523 + ], + "spans": [ + { + "bbox": [ + 113, + 516, + 329, + 523 + ], + "type": "text", + "content": "4. Fourth figure: A hexagon with a line from the bottom-right vertex to the left side" + } + ] + } + ], + "index": 35 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 529, + 496, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 529, + 496, + 545 + ], + "spans": [ + { + "bbox": [ + 113, + 529, + 496, + 545 + ], + "type": "text", + "content": "Looking at this sequence, I can see that the starting point of the line moves clockwise around the hexagon (from top " + }, + { + "bbox": [ + 113, + 529, + 496, + 545 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 113, + 529, + 496, + 545 + ], + "type": "text", + "content": " top-right " + }, + { + "bbox": [ + 113, + 529, + 496, + 545 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 113, + 529, + 496, + 545 + ], + "type": "text", + "content": " right " + }, + { + "bbox": [ + 113, + 529, + 496, + 545 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 113, + 529, + 496, + 545 + ], + "type": "text", + "content": " bottom-right). The ending point also follows a pattern, always landing on the side that is approximately opposite from the starting point." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 113, + 550, + 193, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 550, + 193, + 559 + ], + "spans": [ + { + "bbox": [ + 113, + 550, + 193, + 559 + ], + "type": "text", + "content": "Predicting the Next Figure" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 113, + 564, + 496, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 564, + 496, + 581 + ], + "spans": [ + { + "bbox": [ + 113, + 564, + 496, + 581 + ], + "type": "text", + "content": "Following this pattern, the fifth figure should have a line starting from the bottom-left vertex (continuing the clockwise movement of the starting point). The line should end at the top or top-right side." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 113, + 586, + 185, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 586, + 185, + 594 + ], + "spans": [ + { + "bbox": [ + 113, + 586, + 185, + 594 + ], + "type": "text", + "content": "Evaluating the Options" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 113, + 600, + 201, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 600, + 201, + 607 + ], + "spans": [ + { + "bbox": [ + 113, + 600, + 201, + 607 + ], + "type": "text", + "content": "Looking at the provided options:" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 113, + 607, + 329, + 635 + ], + "type": "list", + "angle": 0, + "index": 46, + "blocks": [ + { + "bbox": [ + 113, + 607, + 296, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 607, + 296, + 614 + ], + "spans": [ + { + "bbox": [ + 113, + 607, + 296, + 614 + ], + "type": "text", + "content": "- Option A: A hexagon with a line from the left vertex to the right side" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 113, + 614, + 329, + 621 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 614, + 329, + 621 + ], + "spans": [ + { + "bbox": [ + 113, + 614, + 329, + 621 + ], + "type": "text", + "content": "- Option B: A hexagon with a line from the upper-left vertex to the lower-right side" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 113, + 621, + 313, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 621, + 313, + 628 + ], + "spans": [ + { + "bbox": [ + 113, + 621, + 313, + 628 + ], + "type": "text", + "content": "- Option C: A hexagon with a line from the upper-left vertex to the right side" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 113, + 628, + 297, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 628, + 297, + 635 + ], + "spans": [ + { + "bbox": [ + 113, + 628, + 297, + 635 + ], + "type": "text", + "content": "- Option D: A hexagon with a line from the top vertex to the right side" + } + ] + } + ], + "index": 45 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 641, + 496, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 641, + 496, + 658 + ], + "spans": [ + { + "bbox": [ + 113, + 641, + 496, + 658 + ], + "type": "text", + "content": "Option A is the only one that features a line starting from the left vertex, which is the closest to the bottom-left vertex that should be next in our pattern. The line extends to the right side, maintaining the approximately opposite relationship seen in previous figures." + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 113, + 663, + 143, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 663, + 143, + 670 + ], + "spans": [ + { + "bbox": [ + 113, + 663, + 143, + 670 + ], + "type": "text", + "content": "Answer: A" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 113, + 673, + 232, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 673, + 232, + 683 + ], + "spans": [ + { + "bbox": [ + 113, + 673, + 232, + 683 + ], + "type": "text", + "content": "Error Category: Reasoning Error" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 113, + 683, + 496, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 683, + 496, + 713 + ], + "spans": [ + { + "bbox": [ + 113, + 683, + 496, + 713 + ], + "type": "text", + "content": "Error Reason: The model correctly observes a clockwise movement but incorrectly predicts the direction and placement of the next line, choosing option A. The model fails to identify the angle of the movement pattern." + } + ] + } + ], + "index": 50 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "47" + } + ] + } + ], + "index": 52 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 46 + }, + { + "para_blocks": [ + { + "bbox": [ + 254, + 89, + 351, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 254, + 89, + 351, + 102 + ], + "spans": [ + { + "bbox": [ + 254, + 89, + 351, + 102 + ], + "type": "text", + "content": "Inductive (Medium)" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 363, + 89, + 375, + 100 + ], + "blocks": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "lines": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "spans": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "type": "image", + "image_path": "739f5d92cd82927a2d7bdf4d2ab2309c4bc04f1b4fc7ed3709e892e83ee42b4e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 152, + 103, + 181, + 132 + ], + "blocks": [ + { + "bbox": [ + 152, + 103, + 181, + 132 + ], + "lines": [ + { + "bbox": [ + 152, + 103, + 181, + 132 + ], + "spans": [ + { + "bbox": [ + 152, + 103, + 181, + 132 + ], + "type": "image", + "image_path": "dabc692ef1c0c1082feaaf1e2462b37ee282600a2506e8c89411d6c3df9a1439.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 163, + 132, + 170, + 137 + ], + "lines": [ + { + "bbox": [ + 163, + 132, + 170, + 137 + ], + "spans": [ + { + "bbox": [ + 163, + 132, + 170, + 137 + ], + "type": "text", + "content": "①" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 184, + 105, + 211, + 130 + ], + "blocks": [ + { + "bbox": [ + 184, + 105, + 211, + 130 + ], + "lines": [ + { + "bbox": [ + 184, + 105, + 211, + 130 + ], + "spans": [ + { + "bbox": [ + 184, + 105, + 211, + 130 + ], + "type": "image", + "image_path": "833d68dbfe0935e410c0edebe6b9a388a975d4f0df03af384d4597f191c08d49.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 194, + 132, + 200, + 137 + ], + "lines": [ + { + "bbox": [ + 194, + 132, + 200, + 137 + ], + "spans": [ + { + "bbox": [ + 194, + 132, + 200, + 137 + ], + "type": "text", + "content": "②" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 215, + 105, + 242, + 131 + ], + "blocks": [ + { + "bbox": [ + 215, + 105, + 242, + 131 + ], + "lines": [ + { + "bbox": [ + 215, + 105, + 242, + 131 + ], + "spans": [ + { + "bbox": [ + 215, + 105, + 242, + 131 + ], + "type": "image", + "image_path": "61d4e7983c5dbd66a1aaf55dd7c03249b7db7e007c72d194c2194e20780a9fa2.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 132, + 219, + 137 + ], + "lines": [ + { + "bbox": [ + 214, + 132, + 219, + 137 + ], + "spans": [ + { + "bbox": [ + 214, + 132, + 219, + 137 + ], + "type": "inline_equation", + "content": "\\therefore m = \\frac{3}{11}" + }, + { + "bbox": [ + 214, + 132, + 219, + 137 + ], + "type": "text", + "content": " ;" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 250, + 126, + 489, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 250, + 126, + 489, + 148 + ], + "spans": [ + { + "bbox": [ + 250, + 126, + 489, + 148 + ], + "type": "text", + "content": "Question: Divide the following six figures into two categories, so that each category displays its own pattern." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 251, + 156, + 321, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 251, + 156, + 321, + 167 + ], + "spans": [ + { + "bbox": [ + 251, + 156, + 321, + 167 + ], + "type": "text", + "content": "Ground Truth: (D)" + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 152, + 140, + 181, + 167 + ], + "blocks": [ + { + "bbox": [ + 152, + 140, + 181, + 167 + ], + "lines": [ + { + "bbox": [ + 152, + 140, + 181, + 167 + ], + "spans": [ + { + "bbox": [ + 152, + 140, + 181, + 167 + ], + "type": "image", + "image_path": "ad7496557f0542675336a310ca2059dadf3a471b7b68213fd05a31105358d979.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 163, + 168, + 170, + 174 + ], + "lines": [ + { + "bbox": [ + 163, + 168, + 170, + 174 + ], + "spans": [ + { + "bbox": [ + 163, + 168, + 170, + 174 + ], + "type": "inline_equation", + "content": "④" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 132, + 723, + 477, + 736 + ], + "lines": [ + { + "bbox": [ + 132, + 723, + 477, + 736 + ], + "spans": [ + { + "bbox": [ + 132, + 723, + 477, + 736 + ], + "type": "text", + "content": "Figure 35: A sample correct case of Inductive Reasoning (difficulty: Medium)." + } + ] + } + ], + "index": 94, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 184, + 141, + 211, + 167 + ], + "blocks": [ + { + "bbox": [ + 184, + 141, + 211, + 167 + ], + "lines": [ + { + "bbox": [ + 184, + 141, + 211, + 167 + ], + "spans": [ + { + "bbox": [ + 184, + 141, + 211, + 167 + ], + "type": "image", + "image_path": "34f92a4aa521689cf2d90b4f881ab0bffe6490c57e02a38f4cb90bd5f6129190.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 194, + 168, + 201, + 174 + ], + "lines": [ + { + "bbox": [ + 194, + 168, + 201, + 174 + ], + "spans": [ + { + "bbox": [ + 194, + 168, + 201, + 174 + ], + "type": "text", + "content": "(5)" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 215, + 141, + 242, + 167 + ], + "blocks": [ + { + "bbox": [ + 215, + 141, + 242, + 167 + ], + "lines": [ + { + "bbox": [ + 215, + 141, + 242, + 167 + ], + "spans": [ + { + "bbox": [ + 215, + 141, + 242, + 167 + ], + "type": "image", + "image_path": "d96d394318067b904a712079a7165288a420ae879a0d88f7529f2c1c573e3d0f.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 168, + 219, + 174 + ], + "lines": [ + { + "bbox": [ + 214, + 168, + 219, + 174 + ], + "spans": [ + { + "bbox": [ + 214, + 168, + 219, + 174 + ], + "type": "inline_equation", + "content": "\\therefore m = \\frac{3}{11}" + }, + { + "bbox": [ + 214, + 168, + 219, + 174 + ], + "type": "text", + "content": " ;" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "bbox": [ + 195, + 179, + 238, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 195, + 179, + 238, + 186 + ], + "spans": [ + { + "bbox": [ + 195, + 179, + 238, + 186 + ], + "type": "text", + "content": "B. ①④⑥, ②③⑤" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 195, + 189, + 238, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 195, + 189, + 238, + 194 + ], + "spans": [ + { + "bbox": [ + 195, + 189, + 238, + 194 + ], + "type": "text", + "content": "D. ①②④, ③⑤⑥" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 113, + 178, + 157, + 194 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 113, + 178, + 157, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 178, + 157, + 186 + ], + "spans": [ + { + "bbox": [ + 113, + 178, + 157, + 186 + ], + "type": "text", + "content": "A. ①③⑤. ②④⑥" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 114, + 189, + 156, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 189, + 156, + 194 + ], + "spans": [ + { + "bbox": [ + 114, + 189, + 156, + 194 + ], + "type": "text", + "content": "C.①③④. ②⑤⑥" + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 201, + 496, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 201, + 496, + 209 + ], + "spans": [ + { + "bbox": [ + 113, + 201, + 496, + 209 + ], + "type": "text", + "content": "Clae-3.7-Sonnet-Thinking: I need to analyze the six figures and find a pattern that divides them into two distinct categories. Let me examine each figure:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 114, + 213, + 315, + 253 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 113, + 213, + 315, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 213, + 315, + 221 + ], + "spans": [ + { + "bbox": [ + 113, + 213, + 315, + 221 + ], + "type": "text", + "content": "Figure ①: A square with an internal shape that looks like a doorway or page with a fold." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 114, + 221, + 296, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 221, + 296, + 228 + ], + "spans": [ + { + "bbox": [ + 114, + 221, + 296, + 228 + ], + "type": "text", + "content": "Figure ②: A square with an internal shape that looks like an L-shape or corner." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 114, + 228, + 306, + 234 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 228, + 306, + 234 + ], + "spans": [ + { + "bbox": [ + 114, + 228, + 306, + 234 + ], + "type": "text", + "content": "Figure ③: A square with what appears to be a parallelogram with a small arc on top." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 114, + 234, + 280, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 234, + 280, + 240 + ], + "spans": [ + { + "bbox": [ + 114, + 234, + 280, + 240 + ], + "type": "text", + "content": "Figure 4: A square with some zigzag or lightning-bolt-like shape inside." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 114, + 240, + 257, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 240, + 257, + 247 + ], + "spans": [ + { + "bbox": [ + 114, + 240, + 257, + 247 + ], + "type": "text", + "content": "Figure ⑤: A square with a circle that has an X or cross inside it." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 114, + 247, + 289, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 247, + 289, + 253 + ], + "spans": [ + { + "bbox": [ + 114, + 247, + 289, + 253 + ], + "type": "text", + "content": "Figure ⑥: A square with what looks like a stylized figure or hourglass shape." + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 257, + 404, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 257, + 404, + 264 + ], + "spans": [ + { + "bbox": [ + 113, + 257, + 404, + 264 + ], + "type": "text", + "content": "Now I need to look for patterns or characteristics that could divide these into two groups. Some possible categorization criteria:" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 114, + 264, + 192, + 295 + ], + "type": "list", + "angle": 0, + "index": 36, + "blocks": [ + { + "bbox": [ + 114, + 264, + 187, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 264, + 187, + 270 + ], + "spans": [ + { + "bbox": [ + 114, + 264, + 187, + 270 + ], + "type": "text", + "content": "- Curved vs. straight lines" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 114, + 270, + 187, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 270, + 187, + 276 + ], + "spans": [ + { + "bbox": [ + 114, + 270, + 187, + 276 + ], + "type": "text", + "content": "- Symmetrical vs. asymmetrical" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 114, + 276, + 162, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 276, + 162, + 282 + ], + "spans": [ + { + "bbox": [ + 114, + 276, + 162, + 282 + ], + "type": "text", + "content": "- Simple vs. complex" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 114, + 282, + 171, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 282, + 171, + 289 + ], + "spans": [ + { + "bbox": [ + 114, + 282, + 171, + 289 + ], + "type": "text", + "content": "- Open vs. closed shapes" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 114, + 289, + 192, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 289, + 192, + 295 + ], + "spans": [ + { + "bbox": [ + 114, + 289, + 192, + 295 + ], + "type": "text", + "content": "- Single shape vs. multiple shapes" + } + ] + } + ], + "index": 35 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 300, + 190, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 300, + 190, + 308 + ], + "spans": [ + { + "bbox": [ + 113, + 300, + 190, + 308 + ], + "type": "text", + "content": "Let me try to apply these criteria:" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 113, + 312, + 172, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 312, + 172, + 319 + ], + "spans": [ + { + "bbox": [ + 113, + 312, + 172, + 319 + ], + "type": "text", + "content": "Curved vs. straight lines:" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 114, + 319, + 230, + 325 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 319, + 230, + 325 + ], + "spans": [ + { + "bbox": [ + 114, + 319, + 230, + 325 + ], + "type": "text", + "content": "- Figures with prominent curves: ⑤ (circle with X)" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 114, + 325, + 233, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 325, + 233, + 332 + ], + "spans": [ + { + "bbox": [ + 114, + 325, + 233, + 332 + ], + "type": "text", + "content": "Figures with mainly straight lines: ①, ②, ③, ④, ⑥" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 114, + 337, + 214, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 337, + 214, + 345 + ], + "spans": [ + { + "bbox": [ + 114, + 337, + 214, + 345 + ], + "type": "text", + "content": "That doesn't divide evenly into two groups." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 113, + 350, + 228, + 369 + ], + "type": "list", + "angle": 0, + "index": 45, + "blocks": [ + { + "bbox": [ + 113, + 350, + 185, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 350, + 185, + 356 + ], + "spans": [ + { + "bbox": [ + 113, + 350, + 185, + 356 + ], + "type": "text", + "content": "Symmetrical vs. asymmetrical:" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 114, + 356, + 228, + 362 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 356, + 228, + 362 + ], + "spans": [ + { + "bbox": [ + 114, + 356, + 228, + 362 + ], + "type": "text", + "content": "- More symmetrical: ⑤ (circle with X), possibly ⑥" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 114, + 362, + 222, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 362, + 222, + 369 + ], + "spans": [ + { + "bbox": [ + 114, + 362, + 222, + 369 + ], + "type": "text", + "content": "- Less symmetrical/asymmetrical: ①, ②, ③, ④" + } + ] + } + ], + "index": 44 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 374, + 161, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 374, + 161, + 381 + ], + "spans": [ + { + "bbox": [ + 113, + 374, + 161, + 381 + ], + "type": "text", + "content": "Simple vs. complex:" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 114, + 381, + 198, + 393 + ], + "type": "list", + "angle": 0, + "index": 49, + "blocks": [ + { + "bbox": [ + 114, + 381, + 168, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 381, + 168, + 387 + ], + "spans": [ + { + "bbox": [ + 114, + 381, + 168, + 387 + ], + "type": "text", + "content": "- Simpler shapes: ②, ⑤" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 114, + 387, + 198, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 387, + 198, + 393 + ], + "spans": [ + { + "bbox": [ + 114, + 387, + 198, + 393 + ], + "type": "text", + "content": "- More complex shapes: ①, ③, ④, ⑥" + } + ] + } + ], + "index": 48 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 399, + 209, + 405 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 399, + 209, + 405 + ], + "spans": [ + { + "bbox": [ + 113, + 399, + 209, + 405 + ], + "type": "text", + "content": "Open vs. closed shapes inside the square:" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 114, + 405, + 185, + 418 + ], + "type": "list", + "angle": 0, + "index": 53, + "blocks": [ + { + "bbox": [ + 114, + 405, + 175, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 405, + 175, + 411 + ], + "spans": [ + { + "bbox": [ + 114, + 405, + 175, + 411 + ], + "type": "text", + "content": "- Closed shapes: ⑤ (circle)" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 114, + 411, + 185, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 411, + 185, + 418 + ], + "spans": [ + { + "bbox": [ + 114, + 411, + 185, + 418 + ], + "type": "text", + "content": "- Open shapes: ①, ②, ③, ④, ⑥" + } + ] + } + ], + "index": 52 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 423, + 266, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 423, + 266, + 430 + ], + "spans": [ + { + "bbox": [ + 113, + 423, + 266, + 430 + ], + "type": "text", + "content": "Let me look at whether the internal shapes touch the outer square:" + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 114, + 430, + 244, + 443 + ], + "type": "list", + "angle": 0, + "index": 57, + "blocks": [ + { + "bbox": [ + 114, + 430, + 244, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 430, + 244, + 437 + ], + "spans": [ + { + "bbox": [ + 114, + 430, + 244, + 437 + ], + "type": "text", + "content": "- Touching the outer square: ①, ②, ④, possibly ③ and ⑥" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 114, + 437, + 195, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 437, + 195, + 443 + ], + "spans": [ + { + "bbox": [ + 114, + 437, + 195, + 443 + ], + "type": "text", + "content": "- Not touching the outer square: ⑤" + } + ] + } + ], + "index": 56 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 448, + 276, + 467 + ], + "type": "list", + "angle": 0, + "index": 61, + "blocks": [ + { + "bbox": [ + 113, + 448, + 276, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 448, + 276, + 455 + ], + "spans": [ + { + "bbox": [ + 113, + 448, + 276, + 455 + ], + "type": "text", + "content": "Another possibility - shapes with internal divisions vs. shapes without:" + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 114, + 456, + 193, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 456, + 193, + 461 + ], + "spans": [ + { + "bbox": [ + 114, + 456, + 193, + 461 + ], + "type": "text", + "content": "- With internal divisions: ①, ④, ⑤" + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 114, + 461, + 200, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 461, + 200, + 467 + ], + "spans": [ + { + "bbox": [ + 114, + 461, + 200, + 467 + ], + "type": "text", + "content": "- Without internal divisions: ②, ③, ⑥" + } + ] + } + ], + "index": 60 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 472, + 277, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 472, + 277, + 479 + ], + "spans": [ + { + "bbox": [ + 113, + 472, + 277, + 479 + ], + "type": "text", + "content": "Let me check the answer choices and see if any of these patterns match:" + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 114, + 484, + 155, + 510 + ], + "type": "list", + "angle": 0, + "index": 67, + "blocks": [ + { + "bbox": [ + 114, + 484, + 155, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 484, + 155, + 491 + ], + "spans": [ + { + "bbox": [ + 114, + 484, + 155, + 491 + ], + "type": "text", + "content": "A. ①③⑤, ②④⑥" + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 114, + 491, + 154, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 491, + 154, + 497 + ], + "spans": [ + { + "bbox": [ + 114, + 491, + 154, + 497 + ], + "type": "text", + "content": "B. ①④⑥, ②③⑤" + } + ] + } + ], + "index": 64 + }, + { + "bbox": [ + 114, + 497, + 154, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 497, + 154, + 503 + ], + "spans": [ + { + "bbox": [ + 114, + 497, + 154, + 503 + ], + "type": "text", + "content": "C. ①③④, ②⑤⑥" + } + ] + } + ], + "index": 65 + }, + { + "bbox": [ + 114, + 503, + 155, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 503, + 155, + 510 + ], + "spans": [ + { + "bbox": [ + 114, + 503, + 155, + 510 + ], + "type": "text", + "content": "D. ①②④, ③⑤⑥" + } + ] + } + ], + "index": 66 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 515, + 492, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 515, + 492, + 523 + ], + "spans": [ + { + "bbox": [ + 113, + 515, + 492, + 523 + ], + "type": "text", + "content": "Looking at option A: What could unite " + }, + { + "bbox": [ + 113, + 515, + 492, + 523 + ], + "type": "inline_equation", + "content": "①③⑤" + }, + { + "bbox": [ + 113, + 515, + 492, + 523 + ], + "type": "text", + "content": " ? They don't seem to share an obvious common pattern. What could unite " + }, + { + "bbox": [ + 113, + 515, + 492, + 523 + ], + "type": "inline_equation", + "content": "246?" + }, + { + "bbox": [ + 113, + 515, + 492, + 523 + ], + "type": "text", + "content": " Again, not seeing an obvious pattern." + } + ] + } + ], + "index": 68 + }, + { + "bbox": [ + 114, + 523, + 465, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 523, + 465, + 529 + ], + "spans": [ + { + "bbox": [ + 114, + 523, + 465, + 529 + ], + "type": "text", + "content": "Looking at option B: For " + }, + { + "bbox": [ + 114, + 523, + 465, + 529 + ], + "type": "inline_equation", + "content": "①④⑥" + }, + { + "bbox": [ + 114, + 523, + 465, + 529 + ], + "type": "text", + "content": " They all have some kind of irregular geometric shape. For " + }, + { + "bbox": [ + 114, + 523, + 465, + 529 + ], + "type": "inline_equation", + "content": "②③⑤" + }, + { + "bbox": [ + 114, + 523, + 465, + 529 + ], + "type": "text", + "content": " They all seem to have more regular geometric elements." + } + ] + } + ], + "index": 69 + }, + { + "bbox": [ + 114, + 529, + 336, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 529, + 336, + 535 + ], + "spans": [ + { + "bbox": [ + 114, + 529, + 336, + 535 + ], + "type": "text", + "content": "Looking at option C: For " + }, + { + "bbox": [ + 114, + 529, + 336, + 535 + ], + "type": "inline_equation", + "content": "①③④" + }, + { + "bbox": [ + 114, + 529, + 336, + 535 + ], + "type": "text", + "content": " : Not seeing a clear pattern. For " + }, + { + "bbox": [ + 114, + 529, + 336, + 535 + ], + "type": "inline_equation", + "content": "②⑤⑥" + }, + { + "bbox": [ + 114, + 529, + 336, + 535 + ], + "type": "text", + "content": " : Not seeing a clear pattern." + } + ] + } + ], + "index": 70 + }, + { + "bbox": [ + 114, + 535, + 496, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 535, + 496, + 541 + ], + "spans": [ + { + "bbox": [ + 114, + 535, + 496, + 541 + ], + "type": "text", + "content": "Looking at option D: For " + }, + { + "bbox": [ + 114, + 535, + 496, + 541 + ], + "type": "inline_equation", + "content": "124" + }, + { + "bbox": [ + 114, + 535, + 496, + 541 + ], + "type": "text", + "content": " These all have shapes with straight lines and angles. For " + }, + { + "bbox": [ + 114, + 535, + 496, + 541 + ], + "type": "inline_equation", + "content": "356" + }, + { + "bbox": [ + 114, + 535, + 496, + 541 + ], + "type": "text", + "content": " These all have some curved elements - " + }, + { + "bbox": [ + 114, + 535, + 496, + 541 + ], + "type": "inline_equation", + "content": "③" + }, + { + "bbox": [ + 114, + 535, + 496, + 541 + ], + "type": "text", + "content": " has an arc on top, " + }, + { + "bbox": [ + 114, + 535, + 496, + 541 + ], + "type": "inline_equation", + "content": "⑤" + }, + { + "bbox": [ + 114, + 535, + 496, + 541 + ], + "type": "text", + "content": " is a" + } + ] + } + ], + "index": 71 + }, + { + "bbox": [ + 114, + 541, + 174, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 541, + 174, + 548 + ], + "spans": [ + { + "bbox": [ + 114, + 541, + 174, + 548 + ], + "type": "text", + "content": "circle, " + }, + { + "bbox": [ + 114, + 541, + 174, + 548 + ], + "type": "inline_equation", + "content": "⑥" + }, + { + "bbox": [ + 114, + 541, + 174, + 548 + ], + "type": "text", + "content": " has curved sides" + } + ] + } + ], + "index": 72 + }, + { + "bbox": [ + 114, + 552, + 264, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 552, + 264, + 559 + ], + "spans": [ + { + "bbox": [ + 114, + 552, + 264, + 559 + ], + "type": "text", + "content": "I think option D might be the answer. The distinction seems to be:" + } + ] + } + ], + "index": 73 + }, + { + "bbox": [ + 114, + 559, + 248, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 559, + 248, + 566 + ], + "spans": [ + { + "bbox": [ + 114, + 559, + 248, + 566 + ], + "type": "text", + "content": "-Group " + }, + { + "bbox": [ + 114, + 559, + 248, + 566 + ], + "type": "inline_equation", + "content": "①②④" + }, + { + "bbox": [ + 114, + 559, + 248, + 566 + ], + "type": "text", + "content": " Figures with only straight lines and angles" + } + ] + } + ], + "index": 74 + }, + { + "bbox": [ + 114, + 566, + 258, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 566, + 258, + 572 + ], + "spans": [ + { + "bbox": [ + 114, + 566, + 258, + 572 + ], + "type": "text", + "content": "- Group " + }, + { + "bbox": [ + 114, + 566, + 258, + 572 + ], + "type": "inline_equation", + "content": "③⑤⑥" + }, + { + "bbox": [ + 114, + 566, + 258, + 572 + ], + "type": "text", + "content": " Figures that incorporate some curved elements" + } + ] + } + ], + "index": 75 + }, + { + "bbox": [ + 114, + 572, + 151, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 572, + 151, + 578 + ], + "spans": [ + { + "bbox": [ + 114, + 572, + 151, + 578 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 76 + }, + { + "bbox": [ + 114, + 578, + 195, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 578, + 195, + 585 + ], + "spans": [ + { + "bbox": [ + 114, + 578, + 195, + 585 + ], + "type": "text", + "content": "Analyzing Six Geometric Figures" + } + ] + } + ], + "index": 77 + }, + { + "bbox": [ + 114, + 590, + 369, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 590, + 369, + 597 + ], + "spans": [ + { + "bbox": [ + 114, + 590, + 369, + 597 + ], + "type": "text", + "content": "I need to find a pattern that separates these six figures into two categories. Let me examine each figure carefully:" + } + ] + } + ], + "index": 78 + }, + { + "bbox": [ + 113, + 601, + 329, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 601, + 329, + 609 + ], + "spans": [ + { + "bbox": [ + 113, + 601, + 329, + 609 + ], + "type": "text", + "content": "Figure ①: Square with an internal shape that has straight edges (looks like a doorway or page)" + } + ] + } + ], + "index": 79 + }, + { + "bbox": [ + 113, + 609, + 309, + 640 + ], + "type": "list", + "angle": 0, + "index": 85, + "blocks": [ + { + "bbox": [ + 114, + 609, + 280, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 609, + 280, + 615 + ], + "spans": [ + { + "bbox": [ + 114, + 609, + 280, + 615 + ], + "type": "text", + "content": "Figure ②: Square with an L-shaped internal figure made of straight lines" + } + ] + } + ], + "index": 80 + }, + { + "bbox": [ + 114, + 615, + 307, + 621 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 615, + 307, + 621 + ], + "spans": [ + { + "bbox": [ + 114, + 615, + 307, + 621 + ], + "type": "text", + "content": "Figure ③: Square with a trapezoid/parallelogram shape that has a curved arc on top" + } + ] + } + ], + "index": 81 + }, + { + "bbox": [ + 114, + 621, + 309, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 621, + 309, + 628 + ], + "spans": [ + { + "bbox": [ + 114, + 621, + 309, + 628 + ], + "type": "text", + "content": "Figure ④: Square with zigzag/lightning-shaped internal figure made of straight lines" + } + ] + } + ], + "index": 82 + }, + { + "bbox": [ + 114, + 628, + 241, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 628, + 241, + 634 + ], + "spans": [ + { + "bbox": [ + 114, + 628, + 241, + 634 + ], + "type": "text", + "content": "Figure ⑤: Square with a circle (curved) containing an X" + } + ] + } + ], + "index": 83 + }, + { + "bbox": [ + 114, + 634, + 291, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 634, + 291, + 640 + ], + "spans": [ + { + "bbox": [ + 114, + 634, + 291, + 640 + ], + "type": "text", + "content": "Figure ⑥: Square with what appears to be a stylized figure with curved edges" + } + ] + } + ], + "index": 84 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 645, + 206, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 645, + 206, + 652 + ], + "spans": [ + { + "bbox": [ + 113, + 645, + 206, + 652 + ], + "type": "text", + "content": "Looking at potential pattern differences:" + } + ] + } + ], + "index": 86 + }, + { + "bbox": [ + 114, + 657, + 179, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 657, + 179, + 663 + ], + "spans": [ + { + "bbox": [ + 114, + 657, + 179, + 663 + ], + "type": "text", + "content": "Option D: ①②④, ③⑤⑥" + } + ] + } + ], + "index": 87 + }, + { + "bbox": [ + 114, + 663, + 485, + 683 + ], + "type": "list", + "angle": 0, + "index": 91, + "blocks": [ + { + "bbox": [ + 114, + 663, + 286, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 663, + 286, + 670 + ], + "spans": [ + { + "bbox": [ + 114, + 663, + 286, + 670 + ], + "type": "text", + "content": "- Group ①②④: All figures contain ONLY straight lines and angular shapes" + } + ] + } + ], + "index": 88 + }, + { + "bbox": [ + 114, + 670, + 335, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 670, + 335, + 677 + ], + "spans": [ + { + "bbox": [ + 114, + 670, + 335, + 677 + ], + "type": "text", + "content": "- Group " + }, + { + "bbox": [ + 114, + 670, + 335, + 677 + ], + "type": "inline_equation", + "content": "③⑤⑥" + }, + { + "bbox": [ + 114, + 670, + 335, + 677 + ], + "type": "text", + "content": " All figures incorporate curved elements (arc in " + }, + { + "bbox": [ + 114, + 670, + 335, + 677 + ], + "type": "inline_equation", + "content": "③" + }, + { + "bbox": [ + 114, + 670, + 335, + 677 + ], + "type": "text", + "content": " circle in " + }, + { + "bbox": [ + 114, + 670, + 335, + 677 + ], + "type": "inline_equation", + "content": "⑤" + }, + { + "bbox": [ + 114, + 670, + 335, + 677 + ], + "type": "text", + "content": " curved sides in " + }, + { + "bbox": [ + 114, + 670, + 335, + 677 + ], + "type": "inline_equation", + "content": "⑥" + } + ] + } + ], + "index": 89 + }, + { + "bbox": [ + 114, + 677, + 485, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 677, + 485, + 683 + ], + "spans": [ + { + "bbox": [ + 114, + 677, + 485, + 683 + ], + "type": "text", + "content": "This is a clear and consistent pattern. The first group has exclusively straight-line geometry, while the second group all incorporate curved elements in their design." + } + ] + } + ], + "index": 90 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 688, + 250, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 688, + 250, + 696 + ], + "spans": [ + { + "bbox": [ + 113, + 688, + 250, + 696 + ], + "type": "text", + "content": "The other options don't reveal as clear a pattern distinction." + } + ] + } + ], + "index": 92 + }, + { + "bbox": [ + 114, + 700, + 140, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 700, + 140, + 707 + ], + "spans": [ + { + "bbox": [ + 114, + 700, + 140, + 707 + ], + "type": "text", + "content": "Answer: D" + } + ] + } + ], + "index": 93 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "48" + } + ] + } + ], + "index": 95 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 47 + }, + { + "para_blocks": [ + { + "bbox": [ + 262, + 89, + 343, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 262, + 89, + 343, + 102 + ], + "spans": [ + { + "bbox": [ + 262, + 89, + 343, + 102 + ], + "type": "text", + "content": "Inductive (Hard)" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 363, + 89, + 375, + 101 + ], + "blocks": [ + { + "bbox": [ + 363, + 89, + 375, + 101 + ], + "lines": [ + { + "bbox": [ + 363, + 89, + 375, + 101 + ], + "spans": [ + { + "bbox": [ + 363, + 89, + 375, + 101 + ], + "type": "image", + "image_path": "c6fc1a83d6a9bc93b8784359ac076f5ca84d132570716648ebe7672867c6bd45.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 143, + 723, + 465, + 736 + ], + "lines": [ + { + "bbox": [ + 143, + 723, + 465, + 736 + ], + "spans": [ + { + "bbox": [ + 143, + 723, + 465, + 736 + ], + "type": "text", + "content": "Figure 36: A sample error case of Inductive Reasoning (difficulty: Hard)." + } + ] + } + ], + "index": 80, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 123, + 106, + 255, + 132 + ], + "blocks": [ + { + "bbox": [ + 123, + 106, + 255, + 132 + ], + "lines": [ + { + "bbox": [ + 123, + 106, + 255, + 132 + ], + "spans": [ + { + "bbox": [ + 123, + 106, + 255, + 132 + ], + "type": "image", + "image_path": "1994f7acc55cf3c5043ee5cd9ca074533673ed5163c5a6430e8a63567a075735.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 144, + 135, + 233, + 160 + ], + "blocks": [ + { + "bbox": [ + 144, + 135, + 233, + 160 + ], + "lines": [ + { + "bbox": [ + 144, + 135, + 233, + 160 + ], + "spans": [ + { + "bbox": [ + 144, + 135, + 233, + 160 + ], + "type": "image", + "image_path": "b3a81caaab7e6d1a6308effe863bcf676eca4ca870e4b241ebb9c196a80297ae.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 154, + 162, + 223, + 167 + ], + "lines": [ + { + "bbox": [ + 154, + 162, + 223, + 167 + ], + "spans": [ + { + "bbox": [ + 154, + 162, + 223, + 167 + ], + "type": "text", + "content": "A B C D" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 261, + 110, + 485, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 261, + 110, + 485, + 140 + ], + "spans": [ + { + "bbox": [ + 261, + 110, + 485, + 140 + ], + "type": "text", + "content": "Question: Choose the most appropriate option from the four given choices to fill in the question mark, so that the figures follow a pattern." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 261, + 149, + 331, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 261, + 149, + 331, + 161 + ], + "spans": [ + { + "bbox": [ + 261, + 149, + 331, + 161 + ], + "type": "text", + "content": "Ground Truth: (B)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 113, + 171, + 351, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 171, + 351, + 180 + ], + "spans": [ + { + "bbox": [ + 113, + 171, + 351, + 180 + ], + "type": "text", + "content": "Claude-3.7-Sonnet-Thinking: Let me analyze this pattern recognition problem step by step." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 113, + 183, + 497, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 183, + 497, + 198 + ], + "spans": [ + { + "bbox": [ + 113, + 183, + 497, + 198 + ], + "type": "text", + "content": "The question shows a sequence of 5 geometric figures, followed by a question mark. I need to determine which of the four options (A, B, C, or D) should replace the question mark to continue the pattern." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 113, + 202, + 153, + 209 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 202, + 153, + 209 + ], + "spans": [ + { + "bbox": [ + 113, + 202, + 153, + 209 + ], + "type": "text", + "content": "The sequence is:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 113, + 210, + 238, + 240 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 113, + 210, + 175, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 210, + 175, + 215 + ], + "spans": [ + { + "bbox": [ + 113, + 210, + 175, + 215 + ], + "type": "text", + "content": "1. Two overlapping circles" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 113, + 216, + 175, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 216, + 175, + 221 + ], + "spans": [ + { + "bbox": [ + 113, + 216, + 175, + 221 + ], + "type": "text", + "content": "2. A triangle inside a circle" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 113, + 222, + 238, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 222, + 238, + 228 + ], + "spans": [ + { + "bbox": [ + 113, + 222, + 238, + 228 + ], + "type": "text", + "content": "3. A shape that looks like an oval with horizontal lines" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 113, + 228, + 231, + 234 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 228, + 231, + 234 + ], + "spans": [ + { + "bbox": [ + 113, + 228, + 231, + 234 + ], + "type": "text", + "content": "4. A square with a triangle inside or a diagonal line" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 114, + 234, + 188, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 234, + 188, + 240 + ], + "spans": [ + { + "bbox": [ + 114, + 234, + 188, + 240 + ], + "type": "text", + "content": "5. A pentagon (5-sided polygon)" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 240, + 197, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 240, + 197, + 246 + ], + "spans": [ + { + "bbox": [ + 113, + 240, + 197, + 246 + ], + "type": "text", + "content": "? (The figure we need to determine)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 113, + 251, + 135, + 258 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 251, + 135, + 258 + ], + "spans": [ + { + "bbox": [ + 113, + 251, + 135, + 258 + ], + "type": "text", + "content": "Options:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 113, + 258, + 197, + 277 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 113, + 258, + 188, + 265 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 258, + 188, + 265 + ], + "spans": [ + { + "bbox": [ + 113, + 258, + 188, + 265 + ], + "type": "text", + "content": "A. A hexagon (6-sided polygon)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 113, + 265, + 193, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 265, + 193, + 271 + ], + "spans": [ + { + "bbox": [ + 113, + 265, + 193, + 271 + ], + "type": "text", + "content": "B. A rectangle with a diagonal line" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 113, + 271, + 197, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 271, + 197, + 277 + ], + "spans": [ + { + "bbox": [ + 113, + 271, + 197, + 277 + ], + "type": "text", + "content": "C. A square with a grid/cross inside" + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 277, + 145, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 277, + 145, + 283 + ], + "spans": [ + { + "bbox": [ + 113, + 277, + 145, + 283 + ], + "type": "text", + "content": "D. A triangle" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 113, + 289, + 171, + 295 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 289, + 171, + 295 + ], + "spans": [ + { + "bbox": [ + 113, + 289, + 171, + 295 + ], + "type": "text", + "content": "Let me look for patterns:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 113, + 295, + 423, + 314 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 113, + 295, + 423, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 295, + 423, + 301 + ], + "spans": [ + { + "bbox": [ + 113, + 295, + 423, + 301 + ], + "type": "text", + "content": "- If we're counting sides: circles (infinite sides), triangle (3 sides), oval (curvy, so infinite?), square/rectangle (4 sides), pentagon (5 sides)" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 113, + 301, + 377, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 301, + 377, + 308 + ], + "spans": [ + { + "bbox": [ + 113, + 301, + 377, + 308 + ], + "type": "text", + "content": "- If looking at the number of sides, it might be increasing: " + }, + { + "bbox": [ + 113, + 301, + 377, + 308 + ], + "type": "inline_equation", + "content": "\\infty, 3, \\infty, 4, 5\\ldots" + }, + { + "bbox": [ + 113, + 301, + 377, + 308 + ], + "type": "text", + "content": " which would suggest 6 sides next (hexagon)" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 113, + 308, + 310, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 308, + 310, + 314 + ], + "spans": [ + { + "bbox": [ + 113, + 308, + 310, + 314 + ], + "type": "text", + "content": "- Or it could be about complexity of the shapes or some transformation between them" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 319, + 426, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 319, + 426, + 327 + ], + "spans": [ + { + "bbox": [ + 113, + 319, + 426, + 327 + ], + "type": "text", + "content": "Looking at option A (hexagon), this would follow if the pattern is increasing the number of sides: pentagon (5 sides) " + }, + { + "bbox": [ + 113, + 319, + 426, + 327 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 113, + 319, + 426, + 327 + ], + "type": "text", + "content": " hexagon (6 sides)." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 113, + 331, + 365, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 331, + 365, + 338 + ], + "spans": [ + { + "bbox": [ + 113, + 331, + 365, + 338 + ], + "type": "text", + "content": "This seems like the most logical pattern - the progression of regular polygons with increasing number of sides:" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 113, + 339, + 160, + 363 + ], + "type": "list", + "angle": 0, + "index": 35, + "blocks": [ + { + "bbox": [ + 113, + 339, + 159, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 339, + 159, + 345 + ], + "spans": [ + { + "bbox": [ + 113, + 339, + 159, + 345 + ], + "type": "text", + "content": "- Pentagon (5 sides)" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 114, + 345, + 159, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 345, + 159, + 351 + ], + "spans": [ + { + "bbox": [ + 114, + 345, + 159, + 351 + ], + "type": "text", + "content": "- Hexagon (6 sides)" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 114, + 351, + 160, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 351, + 160, + 357 + ], + "spans": [ + { + "bbox": [ + 114, + 351, + 160, + 357 + ], + "type": "text", + "content": "- Heptagon (7 sides)" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 113, + 357, + 158, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 357, + 158, + 363 + ], + "spans": [ + { + "bbox": [ + 113, + 357, + 158, + 363 + ], + "type": "text", + "content": "- Octagon (8 sides)" + } + ] + } + ], + "index": 34 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 364, + 123, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 364, + 123, + 369 + ], + "spans": [ + { + "bbox": [ + 113, + 364, + 123, + 369 + ], + "type": "text", + "content": "etc." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 113, + 375, + 210, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 375, + 210, + 382 + ], + "spans": [ + { + "bbox": [ + 113, + 375, + 210, + 382 + ], + "type": "text", + "content": "So, the answer appears to be A (hexagon)." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 113, + 387, + 294, + 412 + ], + "type": "list", + "angle": 0, + "index": 42, + "blocks": [ + { + "bbox": [ + 113, + 387, + 249, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 387, + 249, + 394 + ], + "spans": [ + { + "bbox": [ + 113, + 387, + 249, + 394 + ], + "type": "text", + "content": "Let me double-check if there's another pattern I'm missing:" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 113, + 394, + 294, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 394, + 294, + 400 + ], + "spans": [ + { + "bbox": [ + 113, + 394, + 294, + 400 + ], + "type": "text", + "content": "- The shapes aren't consistently \"shape within a shape\" so that's not the pattern" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 114, + 400, + 206, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 400, + 206, + 406 + ], + "spans": [ + { + "bbox": [ + 114, + 400, + 206, + 406 + ], + "type": "text", + "content": "- It's not about filled vs. unfilled shapes" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 114, + 406, + 231, + 412 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 406, + 231, + 412 + ], + "spans": [ + { + "bbox": [ + 114, + 406, + 231, + 412 + ], + "type": "text", + "content": "- It doesn't appear to be about rotation or reflection" + } + ] + } + ], + "index": 41 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 418, + 277, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 418, + 277, + 425 + ], + "spans": [ + { + "bbox": [ + 113, + 418, + 277, + 425 + ], + "type": "text", + "content": "The increasing number of sides seems to be the most consistent pattern:" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 114, + 426, + 255, + 462 + ], + "type": "list", + "angle": 0, + "index": 50, + "blocks": [ + { + "bbox": [ + 114, + 426, + 158, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 426, + 158, + 431 + ], + "spans": [ + { + "bbox": [ + 114, + 426, + 158, + 431 + ], + "type": "text", + "content": "-∞ sides (circles)" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 114, + 431, + 156, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 431, + 156, + 437 + ], + "spans": [ + { + "bbox": [ + 114, + 431, + 156, + 437 + ], + "type": "text", + "content": "-3 sides (triangle)" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 114, + 437, + 255, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 437, + 255, + 444 + ], + "spans": [ + { + "bbox": [ + 114, + 437, + 255, + 444 + ], + "type": "text", + "content": "- Curved shape (represents " + }, + { + "bbox": [ + 114, + 437, + 255, + 444 + ], + "type": "inline_equation", + "content": "\\infty" + }, + { + "bbox": [ + 114, + 437, + 255, + 444 + ], + "type": "text", + "content": " again or could be a special case)" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 114, + 444, + 177, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 444, + 177, + 450 + ], + "spans": [ + { + "bbox": [ + 114, + 444, + 177, + 450 + ], + "type": "text", + "content": "- 4 sides (square/rectangle)" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 114, + 450, + 160, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 450, + 160, + 456 + ], + "spans": [ + { + "bbox": [ + 114, + 450, + 160, + 456 + ], + "type": "text", + "content": "- 5 sides (pentagon)" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 114, + 456, + 225, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 456, + 225, + 462 + ], + "spans": [ + { + "bbox": [ + 114, + 456, + 225, + 462 + ], + "type": "text", + "content": "- ? (logically 6 sides, which would be a hexagon)" + } + ] + } + ], + "index": 49 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 467, + 259, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 467, + 259, + 475 + ], + "spans": [ + { + "bbox": [ + 113, + 467, + 259, + 475 + ], + "type": "text", + "content": "Therefore, option A (hexagon) appears to be the correct answer." + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 114, + 475, + 152, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 475, + 152, + 480 + ], + "spans": [ + { + "bbox": [ + 114, + 475, + 152, + 480 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 113, + 480, + 386, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 480, + 386, + 487 + ], + "spans": [ + { + "bbox": [ + 113, + 480, + 386, + 487 + ], + "type": "text", + "content": "I need to identify the pattern in this sequence of figures and determine which option should replace the question mark." + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 113, + 492, + 181, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 492, + 181, + 499 + ], + "spans": [ + { + "bbox": [ + 113, + 492, + 181, + 499 + ], + "type": "text", + "content": "Let me analyze the sequence:" + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 114, + 499, + 201, + 534 + ], + "type": "list", + "angle": 0, + "index": 61, + "blocks": [ + { + "bbox": [ + 114, + 499, + 175, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 499, + 175, + 505 + ], + "spans": [ + { + "bbox": [ + 114, + 499, + 175, + 505 + ], + "type": "text", + "content": "1. Two overlapping circles" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 114, + 505, + 175, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 505, + 175, + 510 + ], + "spans": [ + { + "bbox": [ + 114, + 505, + 175, + 510 + ], + "type": "text", + "content": "2. A triangle inside a circle" + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 114, + 510, + 187, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 510, + 187, + 517 + ], + "spans": [ + { + "bbox": [ + 114, + 510, + 187, + 517 + ], + "type": "text", + "content": "3. An oval with horizontal lines" + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 114, + 517, + 201, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 517, + 201, + 522 + ], + "spans": [ + { + "bbox": [ + 114, + 517, + 201, + 522 + ], + "type": "text", + "content": "4. A quadrilateral with a diagonal line" + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 114, + 522, + 187, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 522, + 187, + 528 + ], + "spans": [ + { + "bbox": [ + 114, + 522, + 187, + 528 + ], + "type": "text", + "content": "5.A pentagon (5-sided polygon)" + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 114, + 528, + 187, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 528, + 187, + 534 + ], + "spans": [ + { + "bbox": [ + 114, + 528, + 187, + 534 + ], + "type": "text", + "content": "6.2 (This is what I need to find)" + } + ] + } + ], + "index": 60 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 541, + 337, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 541, + 337, + 548 + ], + "spans": [ + { + "bbox": [ + 113, + 541, + 337, + 548 + ], + "type": "text", + "content": "Looking at these shapes, I notice a potential pattern in the number of sides of the primary shapes:" + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 114, + 548, + 211, + 579 + ], + "type": "list", + "angle": 0, + "index": 68, + "blocks": [ + { + "bbox": [ + 114, + 548, + 195, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 548, + 195, + 554 + ], + "spans": [ + { + "bbox": [ + 114, + 548, + 195, + 554 + ], + "type": "text", + "content": "- First shape: Circles (infinite sides)" + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 114, + 554, + 192, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 554, + 192, + 560 + ], + "spans": [ + { + "bbox": [ + 114, + 554, + 192, + 560 + ], + "type": "text", + "content": "- Second shape: Triangle (3 sides)" + } + ] + } + ], + "index": 64 + }, + { + "bbox": [ + 114, + 560, + 211, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 560, + 211, + 567 + ], + "spans": [ + { + "bbox": [ + 114, + 560, + 211, + 567 + ], + "type": "text", + "content": "- Third shape: Oval (curved shape)" + } + ] + } + ], + "index": 65 + }, + { + "bbox": [ + 114, + 567, + 201, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 567, + 201, + 573 + ], + "spans": [ + { + "bbox": [ + 114, + 567, + 201, + 573 + ], + "type": "text", + "content": "- Fourth shape: Quadrilateral (4 sides)" + } + ] + } + ], + "index": 66 + }, + { + "bbox": [ + 114, + 573, + 187, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 573, + 187, + 579 + ], + "spans": [ + { + "bbox": [ + 114, + 573, + 187, + 579 + ], + "type": "text", + "content": "- Fifth shape: Pentagon (5 sides)" + } + ] + } + ], + "index": 67 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 584, + 497, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 584, + 497, + 597 + ], + "spans": [ + { + "bbox": [ + 113, + 584, + 497, + 597 + ], + "type": "text", + "content": "It appears the pattern is showing an increase in the number of sides as we move from the triangle to quadrilateral to pentagon. Following this pattern, the next shape should have 6 sides." + } + ] + } + ], + "index": 69 + }, + { + "bbox": [ + 113, + 603, + 168, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 603, + 168, + 609 + ], + "spans": [ + { + "bbox": [ + 113, + 603, + 168, + 609 + ], + "type": "text", + "content": "Looking at the options:" + } + ] + } + ], + "index": 70 + }, + { + "bbox": [ + 114, + 609, + 206, + 634 + ], + "type": "list", + "angle": 0, + "index": 75, + "blocks": [ + { + "bbox": [ + 114, + 609, + 205, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 609, + 205, + 616 + ], + "spans": [ + { + "bbox": [ + 114, + 609, + 205, + 616 + ], + "type": "text", + "content": "- Option A: Hexagon (6-sided polygon)" + } + ] + } + ], + "index": 71 + }, + { + "bbox": [ + 114, + 616, + 206, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 616, + 206, + 622 + ], + "spans": [ + { + "bbox": [ + 114, + 616, + 206, + 622 + ], + "type": "text", + "content": "-Option B:Rectangle with diagonal line" + } + ] + } + ], + "index": 72 + }, + { + "bbox": [ + 114, + 622, + 180, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 622, + 180, + 628 + ], + "spans": [ + { + "bbox": [ + 114, + 622, + 180, + 628 + ], + "type": "text", + "content": "- Option C: Square with grid" + } + ] + } + ], + "index": 73 + }, + { + "bbox": [ + 114, + 628, + 162, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 628, + 162, + 634 + ], + "spans": [ + { + "bbox": [ + 114, + 628, + 162, + 634 + ], + "type": "text", + "content": "-Option D:Triangle" + } + ] + } + ], + "index": 74 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 639, + 436, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 639, + 436, + 647 + ], + "spans": [ + { + "bbox": [ + 113, + 639, + 436, + 647 + ], + "type": "text", + "content": "Since the logical progression is to a 6-sided figure after the 5-sided pentagon, option A (hexagon) is the correct answer to continue the pattern." + } + ] + } + ], + "index": 76 + }, + { + "bbox": [ + 113, + 652, + 141, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 652, + 141, + 658 + ], + "spans": [ + { + "bbox": [ + 113, + 652, + 141, + 658 + ], + "type": "text", + "content": "Answer: A" + } + ] + } + ], + "index": 77 + }, + { + "bbox": [ + 113, + 662, + 232, + 672 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 662, + 232, + 672 + ], + "spans": [ + { + "bbox": [ + 113, + 662, + 232, + 672 + ], + "type": "text", + "content": "Error Category: Reasoning Error" + } + ] + } + ], + "index": 78 + }, + { + "bbox": [ + 113, + 673, + 497, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 673, + 497, + 712 + ], + "spans": [ + { + "bbox": [ + 113, + 673, + 497, + 712 + ], + "type": "text", + "content": "Error Reason: The model overcomplicates the pattern and misinterprets the progression of shapes. It assumes the sequence is based on increasing the number of polygon sides and selects the hexagon (option A), expecting a 6-sided figure to follow the 5-sided pentagon. However, the actual pattern does not follow this sequence, as could be reasoned from the first three figures." + } + ] + } + ], + "index": 79 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "49" + } + ] + } + ], + "index": 81 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 48 + }, + { + "para_blocks": [ + { + "bbox": [ + 262, + 89, + 342, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 262, + 89, + 342, + 102 + ], + "spans": [ + { + "bbox": [ + 262, + 89, + 342, + 102 + ], + "type": "text", + "content": "Inductive (Hard)" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 363, + 89, + 375, + 100 + ], + "blocks": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "lines": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "spans": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "type": "image", + "image_path": "6dcacdbc633bf2d93fe3048943845a396a23ab6786fee3bb15fbaa39e85fab8c.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 140, + 105, + 197, + 160 + ], + "blocks": [ + { + "bbox": [ + 140, + 105, + 197, + 160 + ], + "lines": [ + { + "bbox": [ + 140, + 105, + 197, + 160 + ], + "spans": [ + { + "bbox": [ + 140, + 105, + 197, + 160 + ], + "type": "image", + "image_path": "17adf262a76fecf8616abb3242adf93be470dbd486216a32ecc0b909fe895b5c.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 140, + 723, + 469, + 736 + ], + "lines": [ + { + "bbox": [ + 140, + 723, + 469, + 736 + ], + "spans": [ + { + "bbox": [ + 140, + 723, + 469, + 736 + ], + "type": "text", + "content": "Figure 37: A sample correct case of Inductive Reasoning (difficulty: Hard)." + } + ] + } + ], + "index": 65, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 132, + 163, + 205, + 185 + ], + "blocks": [ + { + "bbox": [ + 132, + 163, + 205, + 185 + ], + "lines": [ + { + "bbox": [ + 132, + 163, + 205, + 185 + ], + "spans": [ + { + "bbox": [ + 132, + 163, + 205, + 185 + ], + "type": "image", + "image_path": "e9bc9fcac4a2371d182b0f22eb739189041818bfaeb2e9d2b079bab6c84fa652.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 218, + 119, + 489, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 218, + 119, + 489, + 140 + ], + "spans": [ + { + "bbox": [ + 218, + 119, + 489, + 140 + ], + "type": "text", + "content": "Question: Choose the most appropriate option from the four given choices to fill in the question mark, so that the figures follow a pattern." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 219, + 148, + 288, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 148, + 288, + 159 + ], + "spans": [ + { + "bbox": [ + 219, + 148, + 288, + 159 + ], + "type": "text", + "content": "Ground Truth: (C)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 113, + 191, + 496, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 191, + 496, + 205 + ], + "spans": [ + { + "bbox": [ + 113, + 191, + 496, + 205 + ], + "type": "text", + "content": "Claude-3.7-Sonnet-Thinking: This question asks me to identify a pattern in a 3x3 grid of figures and then determine which of the four options (A, B, C, D) should replace the question mark to maintain that pattern." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 114, + 210, + 258, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 210, + 258, + 218 + ], + "spans": [ + { + "bbox": [ + 114, + 210, + 258, + 218 + ], + "type": "text", + "content": "Let me analyze each row of the grid to understand the pattern:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 114, + 223, + 238, + 230 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 223, + 238, + 230 + ], + "spans": [ + { + "bbox": [ + 114, + 223, + 238, + 230 + ], + "type": "text", + "content": "Row 1: Contains line drawings or outlines (thin lines)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 114, + 230, + 219, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 230, + 219, + 236 + ], + "spans": [ + { + "bbox": [ + 114, + 230, + 219, + 236 + ], + "type": "text", + "content": "Row 2: Contains solid black shapes (filled in)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 114, + 236, + 301, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 236, + 301, + 243 + ], + "spans": [ + { + "bbox": [ + 114, + 236, + 301, + 243 + ], + "type": "text", + "content": "Row 3: Contains circular patterns, with the third position having a question mark." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 114, + 247, + 331, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 247, + 331, + 255 + ], + "spans": [ + { + "bbox": [ + 114, + 247, + 331, + 255 + ], + "type": "text", + "content": "Now I need to determine what pattern connects each column and row to find the right answer." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 114, + 260, + 171, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 260, + 171, + 266 + ], + "spans": [ + { + "bbox": [ + 114, + 260, + 171, + 266 + ], + "type": "text", + "content": "Looking at each column:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 114, + 266, + 321, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 266, + 321, + 273 + ], + "spans": [ + { + "bbox": [ + 114, + 266, + 321, + 273 + ], + "type": "text", + "content": "- Column 1: A line drawing at top, a solid shape in middle, and a circular pattern at bottom" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 114, + 273, + 204, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 273, + 204, + 279 + ], + "spans": [ + { + "bbox": [ + 114, + 273, + 204, + 279 + ], + "type": "text", + "content": "- Column 2: Same structure as column 1" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 114, + 279, + 329, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 279, + 329, + 286 + ], + "spans": [ + { + "bbox": [ + 114, + 279, + 329, + 286 + ], + "type": "text", + "content": "- Column 3: Same structure for the first two rows, but the third position has the question mark" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 114, + 291, + 251, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 291, + 251, + 297 + ], + "spans": [ + { + "bbox": [ + 114, + 291, + 251, + 297 + ], + "type": "text", + "content": "Now let's look more closely at the circular patterns in row 3:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 114, + 297, + 325, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 297, + 325, + 304 + ], + "spans": [ + { + "bbox": [ + 114, + 297, + 325, + 304 + ], + "type": "text", + "content": "- Position (3,1): Shows circles arranged in a pattern that seems to form two connected groups" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 114, + 304, + 315, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 304, + 315, + 310 + ], + "spans": [ + { + "bbox": [ + 114, + 304, + 315, + 310 + ], + "type": "text", + "content": "- Position (3,2): Shows circles arranged in a different pattern, also with connected groups" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 114, + 315, + 449, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 315, + 449, + 323 + ], + "spans": [ + { + "bbox": [ + 114, + 315, + 449, + 323 + ], + "type": "text", + "content": "I need to determine what the relationship is between the patterns in the first two rows and how that might indicate what should be in position (3,3)." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 114, + 328, + 169, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 328, + 169, + 335 + ], + "spans": [ + { + "bbox": [ + 114, + 328, + 169, + 335 + ], + "type": "text", + "content": "Looking at row 1 again:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 114, + 335, + 203, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 335, + 203, + 341 + ], + "spans": [ + { + "bbox": [ + 114, + 335, + 203, + 341 + ], + "type": "text", + "content": "- (1,1): Lines forming an angular shape" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 114, + 341, + 216, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 341, + 216, + 346 + ], + "spans": [ + { + "bbox": [ + 114, + 341, + 216, + 346 + ], + "type": "text", + "content": "- (1,2): Lines forming another angular shape" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 114, + 346, + 236, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 346, + 236, + 353 + ], + "spans": [ + { + "bbox": [ + 114, + 346, + 236, + 353 + ], + "type": "text", + "content": "- (1,3): Lines forming a third angular shape with an X" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 114, + 358, + 130, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 358, + 130, + 364 + ], + "spans": [ + { + "bbox": [ + 114, + 358, + 130, + 364 + ], + "type": "text", + "content": "Row 2:" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 114, + 365, + 189, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 365, + 189, + 371 + ], + "spans": [ + { + "bbox": [ + 114, + 365, + 189, + 371 + ], + "type": "text", + "content": "- (2,1): Solid black angular shape" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 114, + 371, + 189, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 371, + 189, + 377 + ], + "spans": [ + { + "bbox": [ + 114, + 371, + 189, + 377 + ], + "type": "text", + "content": "- (2,2): Solid black angular shape" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 114, + 377, + 189, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 377, + 189, + 384 + ], + "spans": [ + { + "bbox": [ + 114, + 377, + 189, + 384 + ], + "type": "text", + "content": "- (2,3): Solid black angular shape" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 114, + 389, + 425, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 389, + 425, + 397 + ], + "spans": [ + { + "bbox": [ + 114, + 389, + 425, + 397 + ], + "type": "text", + "content": "I notice that each row seems to maintain the same visual \"language\" (lines, solid shapes, or circles), but the specific arrangement changes." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 114, + 401, + 320, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 401, + 320, + 409 + ], + "spans": [ + { + "bbox": [ + 114, + 401, + 320, + 409 + ], + "type": "text", + "content": "Let me check if there's a transformation happening across each row or down each column." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 114, + 414, + 345, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 414, + 345, + 422 + ], + "spans": [ + { + "bbox": [ + 114, + 414, + 345, + 422 + ], + "type": "text", + "content": "One possibility is that row 3 should continue some pattern from rows 1 and 2. Looking at the options:" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 114, + 426, + 204, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 426, + 204, + 433 + ], + "spans": [ + { + "bbox": [ + 114, + 426, + 204, + 433 + ], + "type": "text", + "content": "Option A: Circles arranged in a pattern" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 114, + 434, + 204, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 434, + 204, + 441 + ], + "spans": [ + { + "bbox": [ + 114, + 434, + 204, + 441 + ], + "type": "text", + "content": "Option B: Circles arranged in a pattern" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 114, + 441, + 204, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 441, + 204, + 447 + ], + "spans": [ + { + "bbox": [ + 114, + 441, + 204, + 447 + ], + "type": "text", + "content": "Option C: Circles arranged in a pattern" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 114, + 447, + 204, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 447, + 204, + 453 + ], + "spans": [ + { + "bbox": [ + 114, + 447, + 204, + 453 + ], + "type": "text", + "content": "Option D: Circles arranged in a pattern" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 114, + 457, + 228, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 457, + 228, + 464 + ], + "spans": [ + { + "bbox": [ + 114, + 457, + 228, + 464 + ], + "type": "text", + "content": "Looking purely at the row 3 pattern development:" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 114, + 464, + 264, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 464, + 264, + 470 + ], + "spans": [ + { + "bbox": [ + 114, + 464, + 264, + 470 + ], + "type": "text", + "content": "- (3,1) has circles arranged in what appears to be a specific pattern" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 114, + 470, + 223, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 470, + 223, + 476 + ], + "spans": [ + { + "bbox": [ + 114, + 470, + 223, + 476 + ], + "type": "text", + "content": "- (3,2) has circles arranged in a different pattern" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 114, + 476, + 250, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 476, + 250, + 483 + ], + "spans": [ + { + "bbox": [ + 114, + 476, + 250, + 483 + ], + "type": "text", + "content": "- (3,3) should continue this progression in some logical way" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 114, + 487, + 496, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 487, + 496, + 502 + ], + "spans": [ + { + "bbox": [ + 114, + 487, + 496, + 502 + ], + "type": "text", + "content": "Given the progression from (3,1) to (3,2), and looking at the overall pattern of the grid, the most logical answer appears to be option C, as it seems to follow the pattern of arrangements in row 3." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 114, + 506, + 496, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 506, + 496, + 519 + ], + "spans": [ + { + "bbox": [ + 114, + 506, + 496, + 519 + ], + "type": "text", + "content": "However, I'm not completely confident without being able to precisely determine the transformation rule. Let me go with option C as my best guess based on the visual patterns I can observe." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 114, + 519, + 151, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 519, + 151, + 525 + ], + "spans": [ + { + "bbox": [ + 114, + 519, + 151, + 525 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 114, + 525, + 204, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 525, + 204, + 532 + ], + "spans": [ + { + "bbox": [ + 114, + 525, + 204, + 532 + ], + "type": "text", + "content": "# Pattern Analysis for Figure Sequence" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 114, + 537, + 337, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 537, + 337, + 544 + ], + "spans": [ + { + "bbox": [ + 114, + 537, + 337, + 544 + ], + "type": "text", + "content": "I need to identify the pattern in this " + }, + { + "bbox": [ + 114, + 537, + 337, + 544 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 114, + 537, + 337, + 544 + ], + "type": "text", + "content": " grid to determine what should replace the question mark." + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 114, + 550, + 168, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 550, + 168, + 556 + ], + "spans": [ + { + "bbox": [ + 114, + 550, + 168, + 556 + ], + "type": "text", + "content": "Analyzing the rows:" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 114, + 556, + 238, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 556, + 238, + 563 + ], + "spans": [ + { + "bbox": [ + 114, + 556, + 238, + 563 + ], + "type": "text", + "content": "1. **Row 1**: Contains simple line drawings (outlines)" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 114, + 563, + 245, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 563, + 245, + 569 + ], + "spans": [ + { + "bbox": [ + 114, + 563, + 245, + 569 + ], + "type": "text", + "content": "2.**Row 2**: Contains solid black shapes (filled versions)" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 114, + 569, + 220, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 569, + 220, + 575 + ], + "spans": [ + { + "bbox": [ + 114, + 569, + 220, + 575 + ], + "type": "text", + "content": "3.**Row " + }, + { + "bbox": [ + 114, + 569, + 220, + 575 + ], + "type": "inline_equation", + "content": "3^{**}" + }, + { + "bbox": [ + 114, + 569, + 220, + 575 + ], + "type": "text", + "content": ": Contains arrangements of circles" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 114, + 581, + 176, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 581, + 176, + 587 + ], + "spans": [ + { + "bbox": [ + 114, + 581, + 176, + 587 + ], + "type": "text", + "content": "Analyzing the columns:" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 114, + 587, + 245, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 587, + 245, + 594 + ], + "spans": [ + { + "bbox": [ + 114, + 587, + 245, + 594 + ], + "type": "text", + "content": "Each column follows a consistent transformation pattern:" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 114, + 594, + 207, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 594, + 207, + 599 + ], + "spans": [ + { + "bbox": [ + 114, + 594, + 207, + 599 + ], + "type": "text", + "content": "- The top row shows a basic line pattern" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 114, + 600, + 242, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 600, + 242, + 605 + ], + "spans": [ + { + "bbox": [ + 114, + 600, + 242, + 605 + ], + "type": "text", + "content": "- The middle row shows the same pattern filled in black" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 114, + 606, + 248, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 606, + 248, + 612 + ], + "spans": [ + { + "bbox": [ + 114, + 606, + 248, + 612 + ], + "type": "text", + "content": "- The bottom row represents the same pattern using circles" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 114, + 617, + 188, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 617, + 188, + 624 + ], + "spans": [ + { + "bbox": [ + 114, + 617, + 188, + 624 + ], + "type": "text", + "content": "## Looking at the third column:" + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 114, + 624, + 248, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 624, + 248, + 630 + ], + "spans": [ + { + "bbox": [ + 114, + 624, + 248, + 630 + ], + "type": "text", + "content": "- Position (1,3): A line pattern with an \"X\" and some angles" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 114, + 630, + 268, + 636 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 630, + 268, + 636 + ], + "spans": [ + { + "bbox": [ + 114, + 630, + 268, + 636 + ], + "type": "text", + "content": "- Position (2,3): A solid version with square and triangular elements" + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 114, + 636, + 274, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 636, + 274, + 643 + ], + "spans": [ + { + "bbox": [ + 114, + 636, + 274, + 643 + ], + "type": "text", + "content": "- Position (3,3): Should show the same pattern represented with circles" + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 114, + 648, + 176, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 648, + 176, + 654 + ], + "spans": [ + { + "bbox": [ + 114, + 648, + 176, + 654 + ], + "type": "text", + "content": "Examining the options:" + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 114, + 654, + 263, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 654, + 263, + 661 + ], + "spans": [ + { + "bbox": [ + 114, + 654, + 263, + 661 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 114, + 654, + 263, + 661 + ], + "type": "inline_equation", + "content": "^{**}" + }, + { + "bbox": [ + 114, + 654, + 263, + 661 + ], + "type": "text", + "content": "Option A\\*\\*: Circles arranged in an L-shape with some outliers" + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 114, + 661, + 231, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 661, + 231, + 668 + ], + "spans": [ + { + "bbox": [ + 114, + 661, + 231, + 668 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 114, + 661, + 231, + 668 + ], + "type": "inline_equation", + "content": "^{**}" + }, + { + "bbox": [ + 114, + 661, + 231, + 668 + ], + "type": "text", + "content": "Option B\\*\\*: Circles arranged in a zigzag pattern" + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 114, + 668, + 299, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 668, + 299, + 673 + ], + "spans": [ + { + "bbox": [ + 114, + 668, + 299, + 673 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 114, + 668, + 299, + 673 + ], + "type": "inline_equation", + "content": "^{**}" + }, + { + "bbox": [ + 114, + 668, + 299, + 673 + ], + "type": "text", + "content": "Option C\\*: Circles arranged in a figure-8 or loop pattern with outlying circles" + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 114, + 673, + 275, + 680 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 673, + 275, + 680 + ], + "spans": [ + { + "bbox": [ + 114, + 673, + 275, + 680 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 114, + 673, + 275, + 680 + ], + "type": "inline_equation", + "content": "^{**}" + }, + { + "bbox": [ + 114, + 673, + 275, + 680 + ], + "type": "text", + "content": "Option D\\*\\*: Circles arranged in a C-shape with additional elements" + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 114, + 685, + 496, + 699 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 685, + 496, + 699 + ], + "spans": [ + { + "bbox": [ + 114, + 685, + 496, + 699 + ], + "type": "text", + "content": "Looking at how the circle arrangements in positions (3,1) and (3,2) correspond to their respective shapes above them, option C best represents the pattern in the third column. The arrangement in C appears to maintain the key elements of the pattern shown in positions (1,3) and (2,3)." + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 114, + 704, + 140, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 704, + 140, + 710 + ], + "spans": [ + { + "bbox": [ + 114, + 704, + 140, + 710 + ], + "type": "text", + "content": "Answer: C" + } + ] + } + ], + "index": 64 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "50" + } + ] + } + ], + "index": 66 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 49 + }, + { + "para_blocks": [ + { + "bbox": [ + 269, + 89, + 336, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 269, + 89, + 336, + 102 + ], + "spans": [ + { + "bbox": [ + 269, + 89, + 336, + 102 + ], + "type": "text", + "content": "Spatial (Easy)" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 363, + 89, + 375, + 101 + ], + "blocks": [ + { + "bbox": [ + 363, + 89, + 375, + 101 + ], + "lines": [ + { + "bbox": [ + 363, + 89, + 375, + 101 + ], + "spans": [ + { + "bbox": [ + 363, + 89, + 375, + 101 + ], + "type": "image", + "image_path": "80da2aa740657911acc5a646847ccea63c96b896cfdddfe6c9198ef82424b766.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 167, + 109, + 242, + 203 + ], + "blocks": [ + { + "bbox": [ + 167, + 109, + 242, + 203 + ], + "lines": [ + { + "bbox": [ + 167, + 109, + 242, + 203 + ], + "spans": [ + { + "bbox": [ + 167, + 109, + 242, + 203 + ], + "type": "image", + "image_path": "904965bbf4265f24d2577a52604da12422da4d054e267cebc6d8b1a6743bfb7f.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 151, + 723, + 459, + 736 + ], + "lines": [ + { + "bbox": [ + 151, + 723, + 459, + 736 + ], + "spans": [ + { + "bbox": [ + 151, + 723, + 459, + 736 + ], + "type": "text", + "content": "Figure 38: A sample error case of Spatial Reasoning (difficulty: Easy)." + } + ] + } + ], + "index": 69, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 274, + 137, + 457, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 274, + 137, + 457, + 148 + ], + "spans": [ + { + "bbox": [ + 274, + 137, + 457, + 148 + ], + "type": "text", + "content": "Question: How many blocks are in this 3D tower?" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 276, + 148, + 383, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 148, + 383, + 158 + ], + "spans": [ + { + "bbox": [ + 276, + 148, + 383, + 158 + ], + "type": "text", + "content": "Options:A:7.B:8.C:9.D:10" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 275, + 167, + 344, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 275, + 167, + 344, + 178 + ], + "spans": [ + { + "bbox": [ + 275, + 167, + 344, + 178 + ], + "type": "text", + "content": "Ground Truth: (C)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 113, + 207, + 422, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 207, + 422, + 216 + ], + "spans": [ + { + "bbox": [ + 113, + 207, + 422, + 216 + ], + "type": "text", + "content": "Claude-3.7-Sonnet-Thinking: I need to count the number of blocks in this 3D tower. Let me analyze the structure carefully." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 113, + 220, + 448, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 220, + 448, + 228 + ], + "spans": [ + { + "bbox": [ + 113, + 220, + 448, + 228 + ], + "type": "text", + "content": "The tower appears to have several levels with blocks arranged in a step-like pattern. I'll try to count methodically by identifying each visible block." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 113, + 232, + 165, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 232, + 165, + 239 + ], + "spans": [ + { + "bbox": [ + 113, + 232, + 165, + 239 + ], + "type": "text", + "content": "Looking at the image:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 114, + 239, + 292, + 258 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 114, + 239, + 292, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 239, + 292, + 246 + ], + "spans": [ + { + "bbox": [ + 114, + 239, + 292, + 246 + ], + "type": "text", + "content": "- At the bottom level (ground level), I can see 3 blocks arranged in an L-shape." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 114, + 246, + 211, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 246, + 211, + 251 + ], + "spans": [ + { + "bbox": [ + 114, + 246, + 211, + 251 + ], + "type": "text", + "content": "- At the second level up, I can see 2 blocks." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 114, + 251, + 193, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 251, + 193, + 258 + ], + "spans": [ + { + "bbox": [ + 114, + 251, + 193, + 258 + ], + "type": "text", + "content": "- At the top level, I can see 1 block." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 263, + 188, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 263, + 188, + 270 + ], + "spans": [ + { + "bbox": [ + 113, + 263, + 188, + 270 + ], + "type": "text", + "content": "So far that's " + }, + { + "bbox": [ + 113, + 263, + 188, + 270 + ], + "type": "inline_equation", + "content": "3 + 2 + 1 = 6" + }, + { + "bbox": [ + 113, + 263, + 188, + 270 + ], + "type": "text", + "content": " blocks." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 113, + 275, + 411, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 275, + 411, + 283 + ], + "spans": [ + { + "bbox": [ + 113, + 275, + 411, + 283 + ], + "type": "text", + "content": "However, I need to be careful as this is a 3D structure and there might be blocks that are partially or completely hidden from view." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 113, + 288, + 460, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 288, + 460, + 295 + ], + "spans": [ + { + "bbox": [ + 113, + 288, + 460, + 295 + ], + "type": "text", + "content": "Bottom level: I see what appears to be an L-shape with 3 visible cubes. There might be a 4th cube completing the square " + }, + { + "bbox": [ + 113, + 288, + 460, + 295 + ], + "type": "inline_equation", + "content": "(2\\times 2)" + }, + { + "bbox": [ + 113, + 288, + 460, + 295 + ], + "type": "text", + "content": ", but it's not clearly visible." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 114, + 295, + 175, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 295, + 175, + 300 + ], + "spans": [ + { + "bbox": [ + 114, + 295, + 175, + 300 + ], + "type": "text", + "content": "Middle level: I see 2 cubes." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 114, + 300, + 166, + 306 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 300, + 166, + 306 + ], + "spans": [ + { + "bbox": [ + 114, + 300, + 166, + 306 + ], + "type": "text", + "content": "Top level: I see 1 cube." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 114, + 306, + 496, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 306, + 496, + 319 + ], + "spans": [ + { + "bbox": [ + 114, + 306, + 496, + 319 + ], + "type": "text", + "content": "Given the perspective and the way 3D blocks are usually represented, it seems most likely that there are 7 blocks total " + }, + { + "bbox": [ + 114, + 306, + 496, + 319 + ], + "type": "inline_equation", + "content": "(3 + 3 + 1)" + }, + { + "bbox": [ + 114, + 306, + 496, + 319 + ], + "type": "text", + "content": " but I need to be very careful about blocks that may be hidden from view." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 114, + 325, + 221, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 325, + 221, + 331 + ], + "spans": [ + { + "bbox": [ + 114, + 325, + 221, + 331 + ], + "type": "text", + "content": "Actually, let me count one more time carefully:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 114, + 331, + 221, + 368 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 114, + 331, + 172, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 331, + 172, + 337 + ], + "spans": [ + { + "bbox": [ + 114, + 331, + 172, + 337 + ], + "type": "text", + "content": "1. Bottom front left block" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 114, + 337, + 176, + 343 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 337, + 176, + 343 + ], + "spans": [ + { + "bbox": [ + 114, + 337, + 176, + 343 + ], + "type": "text", + "content": "2. Bottom front right block" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 114, + 343, + 221, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 343, + 221, + 350 + ], + "spans": [ + { + "bbox": [ + 114, + 343, + 221, + 350 + ], + "type": "text", + "content": "3. Bottom back left block (creating the L-shape)" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 114, + 350, + 163, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 350, + 163, + 356 + ], + "spans": [ + { + "bbox": [ + 114, + 350, + 163, + 356 + ], + "type": "text", + "content": "4.Middle front block" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 114, + 356, + 163, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 356, + 163, + 361 + ], + "spans": [ + { + "bbox": [ + 114, + 356, + 163, + 361 + ], + "type": "text", + "content": "5.Middle back block" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 114, + 361, + 143, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 361, + 143, + 368 + ], + "spans": [ + { + "bbox": [ + 114, + 361, + 143, + 368 + ], + "type": "text", + "content": "6. Top block" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 368, + 483, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 368, + 483, + 375 + ], + "spans": [ + { + "bbox": [ + 113, + 368, + 483, + 375 + ], + "type": "text", + "content": "7. Bottom back right block (which would complete the " + }, + { + "bbox": [ + 113, + 368, + 483, + 375 + ], + "type": "inline_equation", + "content": "2 \\times 2" + }, + { + "bbox": [ + 113, + 368, + 483, + 375 + ], + "type": "text", + "content": " square at the bottom) - this one is not clearly visible but would typically be present in a stable structure" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 113, + 380, + 160, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 380, + 160, + 388 + ], + "spans": [ + { + "bbox": [ + 113, + 380, + 160, + 388 + ], + "type": "text", + "content": "That gives 7 blocks." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 114, + 392, + 250, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 392, + 250, + 399 + ], + "spans": [ + { + "bbox": [ + 114, + 392, + 250, + 399 + ], + "type": "text", + "content": "After careful consideration, I believe the answer is 7 blocks." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 113, + 404, + 291, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 404, + 291, + 413 + ], + "spans": [ + { + "bbox": [ + 113, + 404, + 291, + 413 + ], + "type": "text", + "content": "Actually, I'm not confident about my count. Let me try to visualize this again:" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 114, + 418, + 179, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 418, + 179, + 423 + ], + "spans": [ + { + "bbox": [ + 114, + 418, + 179, + 423 + ], + "type": "text", + "content": "The structure appears to be:" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 114, + 423, + 241, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 423, + 241, + 430 + ], + "spans": [ + { + "bbox": [ + 114, + 423, + 241, + 430 + ], + "type": "text", + "content": "- Bottom layer: Looks like a " + }, + { + "bbox": [ + 114, + 423, + 241, + 430 + ], + "type": "inline_equation", + "content": "2 \\times 2" + }, + { + "bbox": [ + 114, + 423, + 241, + 430 + ], + "type": "text", + "content": " arrangement (4 blocks)" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 114, + 430, + 201, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 430, + 201, + 437 + ], + "spans": [ + { + "bbox": [ + 114, + 430, + 201, + 437 + ], + "type": "text", + "content": "- Middle layer: Appears to be 2 blocks" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 114, + 437, + 159, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 437, + 159, + 443 + ], + "spans": [ + { + "bbox": [ + 114, + 437, + 159, + 443 + ], + "type": "text", + "content": "-Top layer:1 block" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 114, + 448, + 217, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 448, + 217, + 455 + ], + "spans": [ + { + "bbox": [ + 114, + 448, + 217, + 455 + ], + "type": "text", + "content": "That would give us " + }, + { + "bbox": [ + 114, + 448, + 217, + 455 + ], + "type": "inline_equation", + "content": "4 + 2 + 1 = 7" + }, + { + "bbox": [ + 114, + 448, + 217, + 455 + ], + "type": "text", + "content": " blocks total." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 113, + 460, + 311, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 460, + 311, + 468 + ], + "spans": [ + { + "bbox": [ + 113, + 460, + 311, + 468 + ], + "type": "text", + "content": "But wait, the image shows what looks like steps. Let me try to count each visible cube:" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 114, + 468, + 158, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 468, + 158, + 473 + ], + "spans": [ + { + "bbox": [ + 114, + 468, + 158, + 473 + ], + "type": "text", + "content": "1. Bottom front left" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 114, + 473, + 162, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 473, + 162, + 479 + ], + "spans": [ + { + "bbox": [ + 114, + 473, + 162, + 479 + ], + "type": "text", + "content": "2. Bottom front right" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 114, + 479, + 147, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 479, + 147, + 485 + ], + "spans": [ + { + "bbox": [ + 114, + 479, + 147, + 485 + ], + "type": "text", + "content": "3.Middle left" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 114, + 485, + 149, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 485, + 149, + 491 + ], + "spans": [ + { + "bbox": [ + 114, + 485, + 149, + 491 + ], + "type": "text", + "content": "4.Middle right" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 114, + 491, + 147, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 491, + 147, + 498 + ], + "spans": [ + { + "bbox": [ + 114, + 491, + 147, + 498 + ], + "type": "text", + "content": "5. Top middle" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 114, + 498, + 198, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 498, + 198, + 504 + ], + "spans": [ + { + "bbox": [ + 114, + 498, + 198, + 504 + ], + "type": "text", + "content": "6. Bottom back left (partially visible)" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 114, + 504, + 203, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 504, + 203, + 510 + ], + "spans": [ + { + "bbox": [ + 114, + 504, + 203, + 510 + ], + "type": "text", + "content": "7. Bottom back right (might be hidden)" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 114, + 510, + 339, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 510, + 339, + 517 + ], + "spans": [ + { + "bbox": [ + 114, + 510, + 339, + 517 + ], + "type": "text", + "content": "Based on what I can see in the image, there appear to be 7 blocks, which corresponds to option (A)." + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 114, + 517, + 152, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 517, + 152, + 523 + ], + "spans": [ + { + "bbox": [ + 114, + 517, + 152, + 523 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 114, + 523, + 195, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 523, + 195, + 529 + ], + "spans": [ + { + "bbox": [ + 114, + 523, + 195, + 529 + ], + "type": "text", + "content": "# Counting Blocks in the 3D Tower" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 113, + 534, + 427, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 534, + 427, + 541 + ], + "spans": [ + { + "bbox": [ + 113, + 534, + 427, + 541 + ], + "type": "text", + "content": "To solve this problem, I need to carefully count all the blocks in this 3D structure, including any that might be partially hidden from view." + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 113, + 541, + 291, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 541, + 291, + 548 + ], + "spans": [ + { + "bbox": [ + 113, + 541, + 291, + 548 + ], + "type": "text", + "content": "Let me work through this systematically by analyzing each level of the tower:" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 114, + 553, + 188, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 553, + 188, + 559 + ], + "spans": [ + { + "bbox": [ + 114, + 553, + 188, + 559 + ], + "type": "text", + "content": "Bottom Level (ground level):" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 114, + 559, + 246, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 559, + 246, + 566 + ], + "spans": [ + { + "bbox": [ + 114, + 559, + 246, + 566 + ], + "type": "text", + "content": "I can see what appears to be a " + }, + { + "bbox": [ + 114, + 559, + 246, + 566 + ], + "type": "inline_equation", + "content": "2 \\times 2" + }, + { + "bbox": [ + 114, + 559, + 246, + 566 + ], + "type": "text", + "content": " arrangement of blocks:" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 114, + 566, + 195, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 566, + 195, + 571 + ], + "spans": [ + { + "bbox": [ + 114, + 566, + 195, + 571 + ], + "type": "text", + "content": "That's 4 blocks on the bottom level." + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 114, + 578, + 154, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 578, + 154, + 584 + ], + "spans": [ + { + "bbox": [ + 114, + 578, + 154, + 584 + ], + "type": "text", + "content": "Middle Level:" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 114, + 584, + 185, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 584, + 185, + 590 + ], + "spans": [ + { + "bbox": [ + 114, + 584, + 185, + 590 + ], + "type": "text", + "content": "Moving up one level, I can see:" + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 114, + 590, + 222, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 590, + 222, + 596 + ], + "spans": [ + { + "bbox": [ + 114, + 590, + 222, + 596 + ], + "type": "text", + "content": "- Middle left block (above the bottom front left)" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 114, + 596, + 225, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 596, + 225, + 602 + ], + "spans": [ + { + "bbox": [ + 114, + 596, + 225, + 602 + ], + "type": "text", + "content": "- Middle right block (above the bottom back left)" + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 114, + 602, + 195, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 602, + 195, + 609 + ], + "spans": [ + { + "bbox": [ + 114, + 602, + 195, + 609 + ], + "type": "text", + "content": "That's 2 blocks on the middle level." + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 114, + 614, + 146, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 614, + 146, + 620 + ], + "spans": [ + { + "bbox": [ + 114, + 614, + 146, + 620 + ], + "type": "text", + "content": "Top Level:" + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 114, + 620, + 171, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 620, + 171, + 628 + ], + "spans": [ + { + "bbox": [ + 114, + 620, + 171, + 628 + ], + "type": "text", + "content": "At the very top, there is:" + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 114, + 628, + 233, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 628, + 233, + 634 + ], + "spans": [ + { + "bbox": [ + 114, + 628, + 233, + 634 + ], + "type": "text", + "content": "- One block positioned above the middle right block" + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 114, + 634, + 184, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 634, + 184, + 640 + ], + "spans": [ + { + "bbox": [ + 114, + 634, + 184, + 640 + ], + "type": "text", + "content": "That's 1 block on the top level." + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 114, + 646, + 151, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 646, + 151, + 651 + ], + "spans": [ + { + "bbox": [ + 114, + 646, + 151, + 651 + ], + "type": "text", + "content": "Total Count:" + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 114, + 651, + 211, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 651, + 211, + 658 + ], + "spans": [ + { + "bbox": [ + 114, + 651, + 211, + 658 + ], + "type": "text", + "content": "Adding all levels: " + }, + { + "bbox": [ + 114, + 651, + 211, + 658 + ], + "type": "inline_equation", + "content": "4 + 2 + 1 = 7" + }, + { + "bbox": [ + 114, + 651, + 211, + 658 + ], + "type": "text", + "content": " blocks total" + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 114, + 658, + 218, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 658, + 218, + 664 + ], + "spans": [ + { + "bbox": [ + 114, + 658, + 218, + 664 + ], + "type": "text", + "content": "Therefore, there are 7 blocks in this 3D tower." + } + ] + } + ], + "index": 64 + }, + { + "bbox": [ + 114, + 670, + 140, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 670, + 140, + 677 + ], + "spans": [ + { + "bbox": [ + 114, + 670, + 140, + 677 + ], + "type": "text", + "content": "Answer: A" + } + ] + } + ], + "index": 65 + }, + { + "bbox": [ + 113, + 681, + 269, + 692 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 681, + 269, + 692 + ], + "spans": [ + { + "bbox": [ + 113, + 681, + 269, + 692 + ], + "type": "text", + "content": "Error Category: Spatial / Orientation Error" + } + ] + } + ], + "index": 66 + }, + { + "bbox": [ + 113, + 692, + 496, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 692, + 496, + 701 + ], + "spans": [ + { + "bbox": [ + 113, + 692, + 496, + 701 + ], + "type": "text", + "content": "Error Reason: The model makes a spatial reasoning error by assuming only 3 blocks at the bottom level," + } + ] + } + ], + "index": 67 + }, + { + "bbox": [ + 114, + 701, + 462, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 701, + 462, + 712 + ], + "spans": [ + { + "bbox": [ + 114, + 701, + 462, + 712 + ], + "type": "text", + "content": "overlooking a likely 4th hidden cube that would complete the L-shape into a full " + }, + { + "bbox": [ + 114, + 701, + 462, + 712 + ], + "type": "inline_equation", + "content": "2 \\times 2" + }, + { + "bbox": [ + 114, + 701, + 462, + 712 + ], + "type": "text", + "content": " square base." + } + ] + } + ], + "index": 68 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "51" + } + ] + } + ], + "index": 70 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 50 + }, + { + "para_blocks": [ + { + "bbox": [ + 269, + 89, + 336, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 269, + 89, + 336, + 102 + ], + "spans": [ + { + "bbox": [ + 269, + 89, + 336, + 102 + ], + "type": "text", + "content": "Spatial (Easy)" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 363, + 89, + 375, + 100 + ], + "blocks": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "lines": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "spans": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "type": "image", + "image_path": "cefa652b64bca2cb3ded5a974bd666119b76399473475757d6e80f5058d67cca.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 112, + 112, + 279, + 159 + ], + "blocks": [ + { + "bbox": [ + 112, + 112, + 279, + 159 + ], + "lines": [ + { + "bbox": [ + 112, + 112, + 279, + 159 + ], + "spans": [ + { + "bbox": [ + 112, + 112, + 279, + 159 + ], + "type": "image", + "image_path": "1dca39e1bbec887164256f6110fefe845dd7ef508126048d7df6d8430b767d84.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 146, + 723, + 463, + 736 + ], + "lines": [ + { + "bbox": [ + 146, + 723, + 463, + 736 + ], + "spans": [ + { + "bbox": [ + 146, + 723, + 463, + 736 + ], + "type": "text", + "content": "Figure 39: A sample correct case of Spatial Reasoning (difficulty: Easy)." + } + ] + } + ], + "index": 50, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 283, + 108, + 497, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 108, + 497, + 137 + ], + "spans": [ + { + "bbox": [ + 283, + 108, + 497, + 137 + ], + "type": "text", + "content": "Question: Some kids are facing towards front and others are facing towards back. How many kids are holding another kid's hand with their right hand?" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 284, + 137, + 389, + 147 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 137, + 389, + 147 + ], + "spans": [ + { + "bbox": [ + 284, + 137, + 389, + 147 + ], + "type": "text", + "content": "Options: A: 2. B: 6. C: 5. D: 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 284, + 150, + 353, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 150, + 353, + 161 + ], + "spans": [ + { + "bbox": [ + 284, + 150, + 353, + 161 + ], + "type": "text", + "content": "Ground Truth: (C)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 113, + 165, + 386, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 165, + 386, + 175 + ], + "spans": [ + { + "bbox": [ + 113, + 165, + 386, + 175 + ], + "type": "text", + "content": "Claude-3.7-Sonnet-Thinking: Let's analyze the image and the problem:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 112, + 182, + 495, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 182, + 495, + 201 + ], + "spans": [ + { + "bbox": [ + 112, + 182, + 495, + 201 + ], + "type": "text", + "content": "In the image, I can see 6 kids standing in a chain holding hands with each other. Some are facing forward and some are facing backward." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 113, + 209, + 410, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 209, + 410, + 218 + ], + "spans": [ + { + "bbox": [ + 113, + 209, + 410, + 218 + ], + "type": "text", + "content": "The question asks: \"How many kids are holding another kid's hand with their right hand?\"" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 113, + 218, + 474, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 218, + 474, + 228 + ], + "spans": [ + { + "bbox": [ + 113, + 218, + 474, + 228 + ], + "type": "text", + "content": "To solve this, I need to identify for each kid whether they are using their right hand to hold another kid's hand." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 113, + 236, + 194, + 245 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 236, + 194, + 245 + ], + "spans": [ + { + "bbox": [ + 113, + 236, + 194, + 245 + ], + "type": "text", + "content": "Going from left to right:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 113, + 245, + 495, + 316 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 113, + 245, + 495, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 245, + 495, + 262 + ], + "spans": [ + { + "bbox": [ + 113, + 245, + 495, + 262 + ], + "type": "text", + "content": "1. Kid 1 (blonde hair, purple dress): This kid appears to be facing forward. Her right hand would be on her right side from our perspective. She's using her right hand to hold the hand of Kid 2." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 113, + 262, + 403, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 262, + 403, + 270 + ], + "spans": [ + { + "bbox": [ + 113, + 262, + 403, + 270 + ], + "type": "text", + "content": "2. Kid 2 (gray hair): Appears to be facing forward. Using right hand to hold Kid 3's hand." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 113, + 270, + 411, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 270, + 411, + 280 + ], + "spans": [ + { + "bbox": [ + 113, + 270, + 411, + 280 + ], + "type": "text", + "content": "3. Kid 3 (green outfit): Appears to be facing forward. Using right hand to hold Kid 4's hand." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 113, + 280, + 495, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 280, + 495, + 297 + ], + "spans": [ + { + "bbox": [ + 113, + 280, + 495, + 297 + ], + "type": "text", + "content": "4. Kid 4 (black outfit): Appears to be facing backward (away from us). From Kid 4's perspective, their right hand would be on our left. So Kid 4 is using their right hand to hold Kid 3's hand." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 113, + 297, + 450, + 306 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 297, + 450, + 306 + ], + "spans": [ + { + "bbox": [ + 113, + 297, + 450, + 306 + ], + "type": "text", + "content": "5. Kid 5 (pink hair, yellow skirt): Appears to be facing backward. Using right hand to hold Kid 4's hand." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 113, + 306, + 485, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 306, + 485, + 316 + ], + "spans": [ + { + "bbox": [ + 113, + 306, + 485, + 316 + ], + "type": "text", + "content": "6. Kid 6 (light green/mint hair): Appears to be facing forward. Using right hand to not hold anyone (end of chain)." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 323, + 373, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 323, + 373, + 333 + ], + "spans": [ + { + "bbox": [ + 113, + 323, + 373, + 333 + ], + "type": "text", + "content": "So, counting the kids who are using their right hand to hold another kid's hand:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 114, + 333, + 148, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 333, + 148, + 340 + ], + "spans": [ + { + "bbox": [ + 114, + 333, + 148, + 340 + ], + "type": "text", + "content": "Kid 1: Yes" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 114, + 342, + 148, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 342, + 148, + 350 + ], + "spans": [ + { + "bbox": [ + 114, + 342, + 148, + 350 + ], + "type": "text", + "content": "Kid 2: Yes" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 114, + 350, + 148, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 350, + 148, + 358 + ], + "spans": [ + { + "bbox": [ + 114, + 350, + 148, + 358 + ], + "type": "text", + "content": "Kid 3: Yes" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 114, + 358, + 148, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 358, + 148, + 367 + ], + "spans": [ + { + "bbox": [ + 114, + 358, + 148, + 367 + ], + "type": "text", + "content": "Kid 4: Yes" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 114, + 368, + 148, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 368, + 148, + 376 + ], + "spans": [ + { + "bbox": [ + 114, + 368, + 148, + 376 + ], + "type": "text", + "content": "Kid 5: Yes" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 114, + 376, + 193, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 376, + 193, + 386 + ], + "spans": [ + { + "bbox": [ + 114, + 376, + 193, + 386 + ], + "type": "text", + "content": "Kid 6: No (end of chain)" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 113, + 394, + 342, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 394, + 342, + 404 + ], + "spans": [ + { + "bbox": [ + 113, + 394, + 342, + 404 + ], + "type": "text", + "content": "That gives us 5 kids using their right hand to hold another kid's hand." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 113, + 411, + 495, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 411, + 495, + 430 + ], + "spans": [ + { + "bbox": [ + 113, + 411, + 495, + 430 + ], + "type": "text", + "content": "However, I need to double-check my reasoning about which direction each kid is facing, as this affects which hand is their right hand." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 113, + 438, + 194, + 447 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 438, + 194, + 447 + ], + "spans": [ + { + "bbox": [ + 113, + 438, + 194, + 447 + ], + "type": "text", + "content": "Looking more carefully:" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 113, + 447, + 441, + 500 + ], + "type": "list", + "angle": 0, + "index": 35, + "blocks": [ + { + "bbox": [ + 113, + 447, + 343, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 447, + 343, + 456 + ], + "spans": [ + { + "bbox": [ + 113, + 447, + 343, + 456 + ], + "type": "text", + "content": "- Kid 1 (blonde): Facing forward, right hand holding Kid 2's hand. Yes." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 114, + 456, + 350, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 456, + 350, + 465 + ], + "spans": [ + { + "bbox": [ + 114, + 456, + 350, + 465 + ], + "type": "text", + "content": "- Kid 2 (gray hair): Facing forward, right hand holding Kid 3's hand. Yes." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 114, + 465, + 358, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 465, + 358, + 474 + ], + "spans": [ + { + "bbox": [ + 114, + 465, + 358, + 474 + ], + "type": "text", + "content": "- Kid 3 (green outfit): Facing forward, right hand holding Kid 4's hand. Yes." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 114, + 474, + 441, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 474, + 441, + 483 + ], + "spans": [ + { + "bbox": [ + 114, + 474, + 441, + 483 + ], + "type": "text", + "content": "- Kid 4 (black outfit): Facing backward, right hand (from their perspective) holding Kid 3's hand. Yes." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 114, + 483, + 356, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 483, + 356, + 491 + ], + "spans": [ + { + "bbox": [ + 114, + 483, + 356, + 491 + ], + "type": "text", + "content": "- Kid 5 (pink hair): Facing backward, right hand holding Kid 4's hand. Yes." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 114, + 491, + 369, + 500 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 491, + 369, + 500 + ], + "spans": [ + { + "bbox": [ + 114, + 491, + 369, + 500 + ], + "type": "text", + "content": "- Kid 6 (mint hair): Facing forward, right hand not holding anyone's hand. No." + } + ] + } + ], + "index": 34 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 508, + 337, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 508, + 337, + 518 + ], + "spans": [ + { + "bbox": [ + 113, + 508, + 337, + 518 + ], + "type": "text", + "content": "So, there are 5 kids holding another kid's hand with their right hand." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 113, + 526, + 205, + 535 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 526, + 205, + 535 + ], + "spans": [ + { + "bbox": [ + 113, + 526, + 205, + 535 + ], + "type": "text", + "content": "The answer should be (C) 5." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 114, + 536, + 167, + 544 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 536, + 167, + 544 + ], + "spans": [ + { + "bbox": [ + 114, + 536, + 167, + 544 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 113, + 544, + 495, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 544, + 495, + 562 + ], + "spans": [ + { + "bbox": [ + 113, + 544, + 495, + 562 + ], + "type": "text", + "content": "I need to determine which kids are holding another kid's hand with their right hand by analyzing the image and understanding each child's orientation." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 113, + 570, + 495, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 570, + 495, + 589 + ], + "spans": [ + { + "bbox": [ + 113, + 570, + 495, + 589 + ], + "type": "text", + "content": "Looking at the image, I can see 6 kids standing in a chain holding hands with each other. Some are facing forward and some are facing backward." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 113, + 597, + 257, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 597, + 257, + 606 + ], + "spans": [ + { + "bbox": [ + 113, + 597, + 257, + 606 + ], + "type": "text", + "content": "Let me analyze each child from left to right:" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 113, + 614, + 495, + 685 + ], + "type": "list", + "angle": 0, + "index": 48, + "blocks": [ + { + "bbox": [ + 113, + 614, + 494, + 623 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 614, + 494, + 623 + ], + "spans": [ + { + "bbox": [ + 113, + 614, + 494, + 623 + ], + "type": "text", + "content": "1. First child (blonde girl in purple dress): Facing forward. She's using her right hand to hold the second child's hand." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 114, + 623, + 411, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 623, + 411, + 632 + ], + "spans": [ + { + "bbox": [ + 114, + 623, + 411, + 632 + ], + "type": "text", + "content": "2. Second child (gray hair): Facing forward. Using right hand to hold the third child's hand." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 114, + 632, + 417, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 632, + 417, + 640 + ], + "spans": [ + { + "bbox": [ + 114, + 632, + 417, + 640 + ], + "type": "text", + "content": "3. Third child (green outfit): Facing forward. Using right hand to hold the fourth child's hand." + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 114, + 641, + 495, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 641, + 495, + 658 + ], + "spans": [ + { + "bbox": [ + 114, + 641, + 495, + 658 + ], + "type": "text", + "content": "4. Fourth child (black outfit): Facing backward. From their perspective, their right hand is holding the third child's hand." + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 114, + 658, + 468, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 658, + 468, + 666 + ], + "spans": [ + { + "bbox": [ + 114, + 658, + 468, + 666 + ], + "type": "text", + "content": "5. Fifth child (pink hair in yellow skirt): Facing backward. Their right hand is holding the fourth child's hand." + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 114, + 667, + 495, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 667, + 495, + 685 + ], + "spans": [ + { + "bbox": [ + 114, + 667, + 495, + 685 + ], + "type": "text", + "content": "6. Sixth child (light-colored hair): Facing forward. This child is at the end of the chain on the right, so their right hand is not holding anyone's hand." + } + ] + } + ], + "index": 47 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 693, + 495, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 693, + 495, + 711 + ], + "spans": [ + { + "bbox": [ + 113, + 693, + 495, + 711 + ], + "type": "text", + "content": "Counting the kids who are using their right hand to hold another kid's hand: 1st, 2nd, 3rd, 4th, and 5th kids = 5 children total." + } + ] + } + ], + "index": 49 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "52" + } + ] + } + ], + "index": 51 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 51 + }, + { + "para_blocks": [ + { + "bbox": [ + 260, + 88, + 345, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 260, + 88, + 345, + 102 + ], + "spans": [ + { + "bbox": [ + 260, + 88, + 345, + 102 + ], + "type": "text", + "content": "Spatial (Medium)" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 363, + 89, + 375, + 101 + ], + "blocks": [ + { + "bbox": [ + 363, + 89, + 375, + 101 + ], + "lines": [ + { + "bbox": [ + 363, + 89, + 375, + 101 + ], + "spans": [ + { + "bbox": [ + 363, + 89, + 375, + 101 + ], + "type": "image", + "image_path": "14b366f4185c02e93ebe4219a693c366bf81475a9410c98c0e1a41b887c3316c.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 169, + 110, + 191, + 133 + ], + "blocks": [ + { + "bbox": [ + 169, + 110, + 191, + 133 + ], + "lines": [ + { + "bbox": [ + 169, + 110, + 191, + 133 + ], + "spans": [ + { + "bbox": [ + 169, + 110, + 191, + 133 + ], + "type": "image", + "image_path": "8e7a8efbb9442d3da6e11d1452e1c3d6dfed84fbbc0dca5848c6be76f2f510cd.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 117, + 142, + 242, + 182 + ], + "blocks": [ + { + "bbox": [ + 117, + 142, + 242, + 182 + ], + "lines": [ + { + "bbox": [ + 117, + 142, + 242, + 182 + ], + "spans": [ + { + "bbox": [ + 117, + 142, + 242, + 182 + ], + "type": "image", + "image_path": "1639cb2aa2bb59d6aaebd4716fe3ca69f37c2ae86b1df24cdb81e1da99dfd062.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 142, + 723, + 466, + 736 + ], + "lines": [ + { + "bbox": [ + 142, + 723, + 466, + 736 + ], + "spans": [ + { + "bbox": [ + 142, + 723, + 466, + 736 + ], + "type": "text", + "content": "Figure 40: A sample error case of Spatial Reasoning (difficulty: Medium)." + } + ] + } + ], + "index": 73, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 129, + 187, + 136, + 194 + ], + "blocks": [ + { + "bbox": [ + 129, + 187, + 136, + 194 + ], + "lines": [ + { + "bbox": [ + 129, + 187, + 136, + 194 + ], + "spans": [ + { + "bbox": [ + 129, + 187, + 136, + 194 + ], + "type": "image", + "image_path": "9b686a95436f9a02fa717ed03cad01e1e16f1c7cc446ebdb14615dd0b806dd7a.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 161, + 188, + 167, + 194 + ], + "blocks": [ + { + "bbox": [ + 161, + 188, + 167, + 194 + ], + "lines": [ + { + "bbox": [ + 161, + 188, + 167, + 194 + ], + "spans": [ + { + "bbox": [ + 161, + 188, + 167, + 194 + ], + "type": "image", + "image_path": "adf5076b567b494c09c346d232e7291ede55ef3f81babce364269d00e4cdb522.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 192, + 188, + 198, + 194 + ], + "blocks": [ + { + "bbox": [ + 192, + 188, + 198, + 194 + ], + "lines": [ + { + "bbox": [ + 192, + 188, + 198, + 194 + ], + "spans": [ + { + "bbox": [ + 192, + 188, + 198, + 194 + ], + "type": "image", + "image_path": "bea696fa24270a2378f3ea6791555d4d9e3504dd156e7e882d77f1f7fd86dcd8.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 225, + 187, + 231, + 194 + ], + "blocks": [ + { + "bbox": [ + 225, + 187, + 231, + 194 + ], + "lines": [ + { + "bbox": [ + 225, + 187, + 231, + 194 + ], + "spans": [ + { + "bbox": [ + 225, + 187, + 231, + 194 + ], + "type": "image", + "image_path": "fd296ebae832def4298738b303e6e0ecdc616dbe78e9eb939acb6ded360c9ebd.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 249, + 136, + 483, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 249, + 136, + 483, + 146 + ], + "spans": [ + { + "bbox": [ + 249, + 136, + 483, + 146 + ], + "type": "text", + "content": "Question: Which cube can be made based on the unfolded cube?" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 250, + 159, + 318, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 250, + 159, + 318, + 169 + ], + "spans": [ + { + "bbox": [ + 250, + 159, + 318, + 169 + ], + "type": "text", + "content": "Ground Truth: (A)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 114, + 198, + 211, + 205 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 198, + 211, + 205 + ], + "spans": [ + { + "bbox": [ + 114, + 198, + 211, + 205 + ], + "type": "text", + "content": "Claude-3.7-Sonnet-Thinking: " + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 114, + 205, + 319, + 243 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 114, + 205, + 279, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 205, + 279, + 211 + ], + "spans": [ + { + "bbox": [ + 114, + 205, + 279, + 211 + ], + "type": "text", + "content": "Let me analyze the folded cube first to understand what I'm looking for:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 114, + 211, + 319, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 211, + 319, + 218 + ], + "spans": [ + { + "bbox": [ + 114, + 211, + 319, + 218 + ], + "type": "text", + "content": "- The cube appears to be mostly gold/yellow with black triangular patterns on some faces" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 115, + 218, + 181, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 218, + 181, + 224 + ], + "spans": [ + { + "bbox": [ + 115, + 218, + 181, + 224 + ], + "type": "text", + "content": "- I can see 3 faces of the cube:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 115, + 224, + 261, + 231 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 224, + 261, + 231 + ], + "spans": [ + { + "bbox": [ + 115, + 224, + 261, + 231 + ], + "type": "text", + "content": "- Front face: Has a black triangular pattern pointing downward" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 115, + 231, + 257, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 231, + 257, + 237 + ], + "spans": [ + { + "bbox": [ + 115, + 231, + 257, + 237 + ], + "type": "text", + "content": "- Top face: Has a black triangular pattern pointing to the right" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 115, + 237, + 258, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 237, + 258, + 243 + ], + "spans": [ + { + "bbox": [ + 115, + 237, + 258, + 243 + ], + "type": "text", + "content": "-Right face: Has a black triangular pattern pointing to the left" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 247, + 496, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 247, + 496, + 262 + ], + "spans": [ + { + "bbox": [ + 113, + 247, + 496, + 262 + ], + "type": "text", + "content": "Now I need to examine each unfolded pattern to see which one would create these patterns when folded into a cube. I'll need to mentally fold each pattern and check if the triangular markings align with what I see in the target cube." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 114, + 266, + 149, + 273 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 266, + 149, + 273 + ], + "spans": [ + { + "bbox": [ + 114, + 266, + 149, + 273 + ], + "type": "text", + "content": "For option (A):" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 114, + 273, + 181, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 273, + 181, + 278 + ], + "spans": [ + { + "bbox": [ + 114, + 273, + 181, + 278 + ], + "type": "text", + "content": "The unfolded pattern shows:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 114, + 279, + 468, + 293 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 115, + 279, + 230, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 279, + 230, + 285 + ], + "spans": [ + { + "bbox": [ + 115, + 279, + 230, + 285 + ], + "type": "text", + "content": "- A T-shape with triangular patterns on some faces" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 114, + 285, + 468, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 285, + 468, + 293 + ], + "spans": [ + { + "bbox": [ + 114, + 285, + 468, + 293 + ], + "type": "text", + "content": "- When I mentally fold this, the triangular patterns don't seem to align with the target cube. The arrangement doesn't match what we see in the folded cube." + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 114, + 297, + 148, + 304 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 297, + 148, + 304 + ], + "spans": [ + { + "bbox": [ + 114, + 297, + 148, + 304 + ], + "type": "text", + "content": "For option (B):" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 114, + 304, + 385, + 323 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 114, + 304, + 174, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 304, + 174, + 310 + ], + "spans": [ + { + "bbox": [ + 114, + 304, + 174, + 310 + ], + "type": "text", + "content": "The unfolded pattern has:" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 115, + 311, + 216, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 311, + 216, + 316 + ], + "spans": [ + { + "bbox": [ + 115, + 311, + 216, + 316 + ], + "type": "text", + "content": "- A cross-like shape with triangular patterns" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 114, + 316, + 385, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 316, + 385, + 323 + ], + "spans": [ + { + "bbox": [ + 114, + 316, + 385, + 323 + ], + "type": "text", + "content": "- When folded, this arrangement might create the patterns we see in the target cube. I need to check this more carefully." + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 114, + 328, + 148, + 335 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 328, + 148, + 335 + ], + "spans": [ + { + "bbox": [ + 114, + 328, + 148, + 335 + ], + "type": "text", + "content": "For option (C):" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 114, + 335, + 282, + 354 + ], + "type": "list", + "angle": 0, + "index": 34, + "blocks": [ + { + "bbox": [ + 114, + 335, + 175, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 335, + 175, + 341 + ], + "spans": [ + { + "bbox": [ + 114, + 335, + 175, + 341 + ], + "type": "text", + "content": "This unfolded pattern has:" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 114, + 341, + 254, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 341, + 254, + 347 + ], + "spans": [ + { + "bbox": [ + 114, + 341, + 254, + 347 + ], + "type": "text", + "content": "- An L-shape with additional squares and triangular patterns" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 114, + 347, + 282, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 347, + 282, + 354 + ], + "spans": [ + { + "bbox": [ + 114, + 347, + 282, + 354 + ], + "type": "text", + "content": "- When mentally folded, the patterns don't seem to match the target cube." + } + ] + } + ], + "index": 33 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 114, + 359, + 148, + 365 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 359, + 148, + 365 + ], + "spans": [ + { + "bbox": [ + 114, + 359, + 148, + 365 + ], + "type": "text", + "content": "For option (D):" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 114, + 365, + 285, + 384 + ], + "type": "list", + "angle": 0, + "index": 39, + "blocks": [ + { + "bbox": [ + 114, + 365, + 175, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 365, + 175, + 372 + ], + "spans": [ + { + "bbox": [ + 114, + 365, + 175, + 372 + ], + "type": "text", + "content": "This unfolded pattern has:" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 114, + 372, + 270, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 372, + 270, + 378 + ], + "spans": [ + { + "bbox": [ + 114, + 372, + 270, + 378 + ], + "type": "text", + "content": "- A cross-like shape with triangular patterns in different orientations" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 114, + 378, + 285, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 378, + 285, + 384 + ], + "spans": [ + { + "bbox": [ + 114, + 378, + 285, + 384 + ], + "type": "text", + "content": "- When mentally folded, I need to check if this could create the target cube." + } + ] + } + ], + "index": 38 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 389, + 427, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 389, + 427, + 396 + ], + "spans": [ + { + "bbox": [ + 113, + 389, + 427, + 396 + ], + "type": "text", + "content": "Let me think about this more methodically. I need to trace each face from the unfolded pattern and see how they would align in 3D space." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 114, + 402, + 199, + 408 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 402, + 199, + 408 + ], + "spans": [ + { + "bbox": [ + 114, + 402, + 199, + 408 + ], + "type": "text", + "content": "Looking at option (B) more carefully:" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 114, + 409, + 446, + 439 + ], + "type": "list", + "angle": 0, + "index": 47, + "blocks": [ + { + "bbox": [ + 114, + 409, + 340, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 409, + 340, + 415 + ], + "spans": [ + { + "bbox": [ + 114, + 409, + 340, + 415 + ], + "type": "text", + "content": "If I consider the cross-shape as the standard cube net, with the center becoming the front face, then:" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 114, + 415, + 363, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 415, + 363, + 421 + ], + "spans": [ + { + "bbox": [ + 114, + 415, + 363, + 421 + ], + "type": "text", + "content": "- The center face has a triangular pattern pointing downward, which matches the front face of the target cube" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 114, + 421, + 446, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 421, + 446, + 427 + ], + "spans": [ + { + "bbox": [ + 114, + 421, + 446, + 427 + ], + "type": "text", + "content": "- The face to the right of center would become the right face, and it has a triangular pattern that would point left when folded, matching the target" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 114, + 427, + 437, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 427, + 437, + 434 + ], + "spans": [ + { + "bbox": [ + 114, + 427, + 437, + 434 + ], + "type": "text", + "content": "- The face above the center would become the top face, and it has a triangular pattern that would point right when folded, matching the target" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 114, + 434, + 256, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 434, + 256, + 439 + ], + "spans": [ + { + "bbox": [ + 114, + 434, + 256, + 439 + ], + "type": "text", + "content": "This seems to be a match for the folded cube shown at the top." + } + ] + } + ], + "index": 46 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 445, + 496, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 445, + 496, + 458 + ], + "spans": [ + { + "bbox": [ + 113, + 445, + 496, + 458 + ], + "type": "text", + "content": "Let me double-check option (D): The arrangement of the triangular patterns doesn't seem to match what we see in the folded cube, particularly the orientation of the triangles." + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 114, + 464, + 203, + 470 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 464, + 203, + 470 + ], + "spans": [ + { + "bbox": [ + 114, + 464, + 203, + 470 + ], + "type": "text", + "content": "I think option (B) is the correct answer." + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 114, + 471, + 151, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 471, + 151, + 476 + ], + "spans": [ + { + "bbox": [ + 114, + 471, + 151, + 476 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 114, + 476, + 256, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 476, + 256, + 483 + ], + "spans": [ + { + "bbox": [ + 114, + 476, + 256, + 483 + ], + "type": "text", + "content": "Analyzing Which Unfolded Pattern Creates the Target Cube" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 114, + 488, + 413, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 488, + 413, + 495 + ], + "spans": [ + { + "bbox": [ + 114, + 488, + 413, + 495 + ], + "type": "text", + "content": "I need to determine which of the unfolded cube patterns (a, b, c, d) would create the folded cube shown at the top when assembled." + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 114, + 500, + 185, + 507 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 500, + 185, + 507 + ], + "spans": [ + { + "bbox": [ + 114, + 500, + 185, + 507 + ], + "type": "text", + "content": "Examining the Target Cube" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 114, + 508, + 258, + 533 + ], + "type": "list", + "angle": 0, + "index": 58, + "blocks": [ + { + "bbox": [ + 114, + 508, + 230, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 508, + 230, + 514 + ], + "spans": [ + { + "bbox": [ + 114, + 508, + 230, + 514 + ], + "type": "text", + "content": "First, I'll identify what's visible on the folded cube:" + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 114, + 514, + 258, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 514, + 258, + 520 + ], + "spans": [ + { + "bbox": [ + 114, + 514, + 258, + 520 + ], + "type": "text", + "content": "- Front face: Has a black triangular pattern pointing downward" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 114, + 521, + 256, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 521, + 256, + 527 + ], + "spans": [ + { + "bbox": [ + 114, + 521, + 256, + 527 + ], + "type": "text", + "content": "- Top face: Has a black triangular pattern pointing to the right" + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 114, + 527, + 255, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 527, + 255, + 533 + ], + "spans": [ + { + "bbox": [ + 114, + 527, + 255, + 533 + ], + "type": "text", + "content": "- Right face: Has a black triangular pattern pointing to the left" + } + ] + } + ], + "index": 57 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 114, + 537, + 200, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 537, + 200, + 544 + ], + "spans": [ + { + "bbox": [ + 114, + 537, + 200, + 544 + ], + "type": "text", + "content": "Analyzing Each Unfolded Pattern" + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 113, + 550, + 496, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 550, + 496, + 563 + ], + "spans": [ + { + "bbox": [ + 113, + 550, + 496, + 563 + ], + "type": "text", + "content": "Option (a): When folded, the triangular patterns wouldn't match the configuration in the target cube. The orientations and positions of the triangles don't align correctly." + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 114, + 568, + 219, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 568, + 219, + 574 + ], + "spans": [ + { + "bbox": [ + 114, + 568, + 219, + 574 + ], + "type": "text", + "content": "Option (b): If I mentally fold this pattern:" + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 114, + 575, + 326, + 600 + ], + "type": "list", + "angle": 0, + "index": 66, + "blocks": [ + { + "bbox": [ + 114, + 575, + 289, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 575, + 289, + 581 + ], + "spans": [ + { + "bbox": [ + 114, + 575, + 289, + 581 + ], + "type": "text", + "content": "- The central face (with downward-pointing triangle) would be the front face" + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 114, + 582, + 298, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 582, + 298, + 587 + ], + "spans": [ + { + "bbox": [ + 114, + 582, + 298, + 587 + ], + "type": "text", + "content": "- The face above it (with rightward-pointing triangle) would become the top face" + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 114, + 588, + 326, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 588, + 326, + 594 + ], + "spans": [ + { + "bbox": [ + 114, + 588, + 326, + 594 + ], + "type": "text", + "content": "- The face to the right of center (with leftward-pointing triangle) would become the right face" + } + ] + } + ], + "index": 64 + }, + { + "bbox": [ + 114, + 594, + 244, + 600 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 594, + 244, + 600 + ], + "spans": [ + { + "bbox": [ + 114, + 594, + 244, + 600 + ], + "type": "text", + "content": "This perfectly matches the pattern seen in the target cube." + } + ] + } + ], + "index": 65 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 605, + 496, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 605, + 496, + 618 + ], + "spans": [ + { + "bbox": [ + 113, + 605, + 496, + 618 + ], + "type": "text", + "content": "Option (c): The arrangement of faces and triangular patterns in this unfolded net wouldn't create the target cube. The orientations of the triangles don't match when folded." + } + ] + } + ], + "index": 67 + }, + { + "bbox": [ + 114, + 624, + 395, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 624, + 395, + 631 + ], + "spans": [ + { + "bbox": [ + 114, + 624, + 395, + 631 + ], + "type": "text", + "content": "Option (d): While this has triangular patterns, when folded the orientations don't match what we see in the target cube." + } + ] + } + ], + "index": 68 + }, + { + "bbox": [ + 114, + 635, + 334, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 635, + 334, + 643 + ], + "spans": [ + { + "bbox": [ + 114, + 635, + 334, + 643 + ], + "type": "text", + "content": "Therefore, option (b) is the only unfolded pattern that would create the target cube when folded." + } + ] + } + ], + "index": 69 + }, + { + "bbox": [ + 114, + 648, + 139, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 648, + 139, + 654 + ], + "spans": [ + { + "bbox": [ + 114, + 648, + 139, + 654 + ], + "type": "text", + "content": "Answer: B" + } + ] + } + ], + "index": 70 + }, + { + "bbox": [ + 114, + 659, + 269, + 669 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 659, + 269, + 669 + ], + "spans": [ + { + "bbox": [ + 114, + 659, + 269, + 669 + ], + "type": "text", + "content": "Error Category: Spatial / Orientation Error" + } + ] + } + ], + "index": 71 + }, + { + "bbox": [ + 113, + 670, + 496, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 670, + 496, + 707 + ], + "spans": [ + { + "bbox": [ + 113, + 670, + 496, + 707 + ], + "type": "text", + "content": "Error Reason: The model incorrectly visualizes how the triangular patterns on the unfolded net align when folded into a cube. It selects option B, mistakenly believing the triangle orientations will match the folded view, but fails to correctly account for how the triangles rotate and face different directions when assembled." + } + ] + } + ], + "index": 72 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "53" + } + ] + } + ], + "index": 74 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 52 + }, + { + "para_blocks": [ + { + "bbox": [ + 260, + 89, + 345, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 260, + 89, + 345, + 102 + ], + "spans": [ + { + "bbox": [ + 260, + 89, + 345, + 102 + ], + "type": "text", + "content": "Spatial (Medium)" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 226, + 107, + 254, + 148 + ], + "blocks": [ + { + "bbox": [ + 226, + 107, + 254, + 148 + ], + "lines": [ + { + "bbox": [ + 226, + 107, + 254, + 148 + ], + "spans": [ + { + "bbox": [ + 226, + 107, + 254, + 148 + ], + "type": "image", + "image_path": "5173ddc5146d9cfeb2d74a7a9f9bb884fa2ce8df452523de82ced9c0974ae68a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 138, + 723, + 471, + 736 + ], + "lines": [ + { + "bbox": [ + 138, + 723, + 471, + 736 + ], + "spans": [ + { + "bbox": [ + 138, + 723, + 471, + 736 + ], + "type": "text", + "content": "Figure 41: A sample correct case of Spatial Reasoning (difficulty: Medium)." + } + ] + } + ], + "index": 60, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 263, + 107, + 291, + 148 + ], + "blocks": [ + { + "bbox": [ + 263, + 107, + 291, + 148 + ], + "lines": [ + { + "bbox": [ + 263, + 107, + 291, + 148 + ], + "spans": [ + { + "bbox": [ + 263, + 107, + 291, + 148 + ], + "type": "image", + "image_path": "65301e5a16dcfa61b2c31b244def2e72eb53721f2d5f98a279f1ff866ed35b6e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 302, + 107, + 330, + 148 + ], + "blocks": [ + { + "bbox": [ + 302, + 107, + 330, + 148 + ], + "lines": [ + { + "bbox": [ + 302, + 107, + 330, + 148 + ], + "spans": [ + { + "bbox": [ + 302, + 107, + 330, + 148 + ], + "type": "image", + "image_path": "c8fb5f58b9700cf16ecc6cb3575f9894069c4c885cbea362069dec97779eacbd.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 339, + 107, + 356, + 148 + ], + "blocks": [ + { + "bbox": [ + 339, + 107, + 356, + 148 + ], + "lines": [ + { + "bbox": [ + 339, + 107, + 356, + 148 + ], + "spans": [ + { + "bbox": [ + 339, + 107, + 356, + 148 + ], + "type": "image", + "image_path": "10bcbe79e5c7c5d962a5e38863c039aae6fad5ad4603da5cacbc3e1b9c18cc88.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 366, + 107, + 379, + 148 + ], + "blocks": [ + { + "bbox": [ + 366, + 107, + 379, + 148 + ], + "lines": [ + { + "bbox": [ + 366, + 107, + 379, + 148 + ], + "spans": [ + { + "bbox": [ + 366, + 107, + 379, + 148 + ], + "type": "image", + "image_path": "2bbfec971459982369375ff4238e17715565acb6e3858aa51c607ea4534d78fc.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 112, + 153, + 496, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 153, + 496, + 192 + ], + "spans": [ + { + "bbox": [ + 112, + 153, + 496, + 192 + ], + "type": "text", + "content": "Question: Paulo took a rectangular sheet of paper, yellow on one side and green on the other side and, with several folds shown in the dotted lines in the figure below, made a little paper plane. To give the airplane a charm, Paulo made a circular hole, marked on the last figure. After playing a lot with the plane, Paulo unfolded the sheet and realized that there were several holes in it. How many holes did he count?" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 113, + 192, + 219, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 192, + 219, + 201 + ], + "spans": [ + { + "bbox": [ + 113, + 192, + 219, + 201 + ], + "type": "text", + "content": "Options: A: 6. B: 7. C: 8. D: 9" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 113, + 204, + 182, + 214 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 204, + 182, + 214 + ], + "spans": [ + { + "bbox": [ + 113, + 204, + 182, + 214 + ], + "type": "text", + "content": "Ground Truth: (C)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 113, + 217, + 306, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 217, + 306, + 225 + ], + "spans": [ + { + "bbox": [ + 113, + 217, + 306, + 225 + ], + "type": "text", + "content": "Claude-3.7-Sonnet-Thinking: Let me analyze this problem step by step." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 113, + 229, + 495, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 229, + 495, + 243 + ], + "spans": [ + { + "bbox": [ + 113, + 229, + 495, + 243 + ], + "type": "text", + "content": "We're looking at a sheet of paper that is folded in several steps to make a paper plane, and then a circular hole is punched through the folded paper. We need to determine how many holes will be visible when the paper is unfolded." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 113, + 247, + 175, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 247, + 175, + 255 + ], + "spans": [ + { + "bbox": [ + 113, + 247, + 175, + 255 + ], + "type": "text", + "content": "From the image, I can see:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 114, + 255, + 297, + 274 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 114, + 255, + 292, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 255, + 292, + 262 + ], + "spans": [ + { + "bbox": [ + 114, + 255, + 292, + 262 + ], + "type": "text", + "content": "1. A rectangular yellow sheet of paper (yellow on one side, green on the other)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 114, + 262, + 242, + 267 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 262, + 242, + 267 + ], + "spans": [ + { + "bbox": [ + 114, + 262, + 242, + 267 + ], + "type": "text", + "content": "2. The paper is folded along dotted lines in several steps" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 114, + 267, + 297, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 267, + 297, + 274 + ], + "spans": [ + { + "bbox": [ + 114, + 267, + 297, + 274 + ], + "type": "text", + "content": "3. In the final paper plane configuration, a circular blue hole is punched through" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 278, + 495, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 278, + 495, + 293 + ], + "spans": [ + { + "bbox": [ + 113, + 278, + 495, + 293 + ], + "type": "text", + "content": "When a hole is punched through multiple layers of folded paper, it creates holes in each layer. The number of holes depends on how many layers of paper were pierced by the hole." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 113, + 297, + 186, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 297, + 186, + 305 + ], + "spans": [ + { + "bbox": [ + 113, + 297, + 186, + 305 + ], + "type": "text", + "content": "Looking at the folding process:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 114, + 310, + 334, + 335 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 114, + 310, + 334, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 310, + 334, + 317 + ], + "spans": [ + { + "bbox": [ + 114, + 310, + 334, + 317 + ], + "type": "text", + "content": "- Second image: Top portion folded to create a triangular shape (green side now showing on top)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 114, + 317, + 246, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 317, + 246, + 323 + ], + "spans": [ + { + "bbox": [ + 114, + 317, + 246, + 323 + ], + "type": "text", + "content": "- Third image: Side portions folded (more green showing)" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 115, + 323, + 187, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 323, + 187, + 329 + ], + "spans": [ + { + "bbox": [ + 115, + 323, + 187, + 329 + ], + "type": "text", + "content": "- Fourth image: Further folding" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 114, + 329, + 242, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 329, + 242, + 335 + ], + "spans": [ + { + "bbox": [ + 114, + 329, + 242, + 335 + ], + "type": "text", + "content": "- Fifth image: Final paper plane with a blue circular hole" + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 340, + 470, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 340, + 470, + 348 + ], + "spans": [ + { + "bbox": [ + 113, + 340, + 470, + 348 + ], + "type": "text", + "content": "To determine the number of holes, I need to figure out how many layers of paper were at the location where the hole was punched in the final configuration." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 113, + 352, + 348, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 352, + 348, + 360 + ], + "spans": [ + { + "bbox": [ + 113, + 352, + 348, + 360 + ], + "type": "text", + "content": "It's a bit difficult to precisely determine from just these diagrams, but I can make a reasonable analysis:" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 113, + 365, + 496, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 365, + 496, + 379 + ], + "spans": [ + { + "bbox": [ + 113, + 365, + 496, + 379 + ], + "type": "text", + "content": "In a typical paper airplane, the center fold creates 2 layers, and then additional folds along the sides can create more layers. The hole appears to be punched near the tip of one wing." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 113, + 384, + 495, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 384, + 495, + 398 + ], + "spans": [ + { + "bbox": [ + 113, + 384, + 495, + 398 + ], + "type": "text", + "content": "When we fold paper, the hole punched through multiple layers will create a hole in each layer. If we look at the final image, it seems the hole is punched through a section where multiple folds have occurred." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 113, + 401, + 448, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 401, + 448, + 410 + ], + "spans": [ + { + "bbox": [ + 113, + 401, + 448, + 410 + ], + "type": "text", + "content": "Based on the folding pattern shown, the paper appears to have been folded in a way that would result in multiple layers at the location of the hole." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 113, + 414, + 495, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 414, + 495, + 429 + ], + "spans": [ + { + "bbox": [ + 113, + 414, + 495, + 429 + ], + "type": "text", + "content": "Without seeing exactly how many layers overlap at the punch point, it's hard to give a definitive answer. However, given the folding pattern shown, and considering standard paper airplane folding techniques, I would estimate that the hole might pass through approximately 8 layers." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 113, + 432, + 389, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 432, + 389, + 441 + ], + "spans": [ + { + "bbox": [ + 113, + 432, + 389, + 441 + ], + "type": "text", + "content": "However, since this is a multiple-choice question with specific options, I should choose among (A) 6, (B) 7, (C) 8, or (D) 9." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 113, + 445, + 496, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 445, + 496, + 459 + ], + "spans": [ + { + "bbox": [ + 113, + 445, + 496, + 459 + ], + "type": "text", + "content": "Given the complexity of the folds and the location of the hole in the final plane, and looking at the options, I believe option (C) 8 is the most reasonable answer. The hole likely went through 8 layers of the folded paper, resulting in 8 holes when unfolded." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 113, + 463, + 474, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 463, + 474, + 471 + ], + "spans": [ + { + "bbox": [ + 113, + 463, + 474, + 471 + ], + "type": "text", + "content": "Actual, let me think about this more carefully. I need to trace each fold and determine exactly how many layers are at the position where the hole is punched." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 113, + 476, + 318, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 476, + 318, + 483 + ], + "spans": [ + { + "bbox": [ + 113, + 476, + 318, + 483 + ], + "type": "text", + "content": "The hole appears to be near one tip of the final paper plane. Based on the folding pattern:" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 114, + 483, + 247, + 502 + ], + "type": "list", + "angle": 0, + "index": 37, + "blocks": [ + { + "bbox": [ + 114, + 483, + 247, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 483, + 247, + 490 + ], + "spans": [ + { + "bbox": [ + 114, + 483, + 247, + 490 + ], + "type": "text", + "content": "1. The initial fold creates a triangle on top (1 fold, 2 layers)" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 114, + 490, + 196, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 490, + 196, + 495 + ], + "spans": [ + { + "bbox": [ + 114, + 490, + 196, + 495 + ], + "type": "text", + "content": "2. Then side folds create more layers" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 114, + 495, + 216, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 495, + 216, + 502 + ], + "spans": [ + { + "bbox": [ + 114, + 495, + 216, + 502 + ], + "type": "text", + "content": "3. The final folds create the completed plane" + } + ] + } + ], + "index": 36 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 506, + 446, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 506, + 446, + 514 + ], + "spans": [ + { + "bbox": [ + 113, + 506, + 446, + 514 + ], + "type": "text", + "content": "Typical paper airplanes can have different numbers of layers at different points. The tip might have 8 layers if several folds pass through that area." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 113, + 519, + 341, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 519, + 341, + 526 + ], + "spans": [ + { + "bbox": [ + 113, + 519, + 341, + 526 + ], + "type": "text", + "content": "Given the options and the standard folding patterns for paper planes, I'll go with option (C) 8 holes." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 114, + 526, + 152, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 526, + 152, + 532 + ], + "spans": [ + { + "bbox": [ + 114, + 526, + 152, + 532 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 114, + 532, + 208, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 532, + 208, + 539 + ], + "spans": [ + { + "bbox": [ + 114, + 532, + 208, + 539 + ], + "type": "text", + "content": "Counting Holes in the Unfolded Paper" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 113, + 544, + 422, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 544, + 422, + 552 + ], + "spans": [ + { + "bbox": [ + 113, + 544, + 422, + 552 + ], + "type": "text", + "content": "I need to determine how many holes appear when Paulo unfolds his paper plane after making a single circular hole in the folded plane." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 114, + 556, + 192, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 556, + 192, + 563 + ], + "spans": [ + { + "bbox": [ + 114, + 556, + 192, + 563 + ], + "type": "text", + "content": "Analyzing the Folding Process" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 114, + 568, + 315, + 594 + ], + "type": "list", + "angle": 0, + "index": 48, + "blocks": [ + { + "bbox": [ + 114, + 568, + 187, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 568, + 187, + 574 + ], + "spans": [ + { + "bbox": [ + 114, + 568, + 187, + 574 + ], + "type": "text", + "content": "Looking at the image sequence:" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 114, + 574, + 313, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 574, + 313, + 582 + ], + "spans": [ + { + "bbox": [ + 114, + 574, + 313, + 582 + ], + "type": "text", + "content": "1. We start with a flat rectangular sheet that's yellow on one side and green on the other" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 114, + 582, + 277, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 582, + 277, + 587 + ], + "spans": [ + { + "bbox": [ + 114, + 582, + 277, + 587 + ], + "type": "text", + "content": "2. The paper goes through several folding steps (shown by dotted lines)" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 114, + 587, + 315, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 587, + 315, + 594 + ], + "spans": [ + { + "bbox": [ + 114, + 587, + 315, + 594 + ], + "type": "text", + "content": "3. In the final paper plane configuration, a single circular hole (blue) is punched through" + } + ] + } + ], + "index": 47 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 599, + 188, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 599, + 188, + 607 + ], + "spans": [ + { + "bbox": [ + 113, + 599, + 188, + 607 + ], + "type": "text", + "content": "Why Multiple Holes Appear" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 113, + 611, + 495, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 611, + 495, + 625 + ], + "spans": [ + { + "bbox": [ + 113, + 611, + 495, + 625 + ], + "type": "text", + "content": "When a hole is punched through multiple layers of folded paper, it creates a hole in each layer. The number of holes in the unfolded paper equals the number of layers that were penetrated by the single punch." + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 113, + 630, + 170, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 630, + 170, + 638 + ], + "spans": [ + { + "bbox": [ + 113, + 630, + 170, + 638 + ], + "type": "text", + "content": "## Counting the Layers" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 113, + 643, + 273, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 643, + 273, + 650 + ], + "spans": [ + { + "bbox": [ + 113, + 643, + 273, + 650 + ], + "type": "text", + "content": "Following the folding process carefully and looking at the final plane:" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 114, + 650, + 297, + 668 + ], + "type": "list", + "angle": 0, + "index": 56, + "blocks": [ + { + "bbox": [ + 114, + 650, + 234, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 650, + 234, + 655 + ], + "spans": [ + { + "bbox": [ + 114, + 650, + 234, + 655 + ], + "type": "text", + "content": "- The initial rectangular sheet is folded several times" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 114, + 655, + 221, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 655, + 221, + 662 + ], + "spans": [ + { + "bbox": [ + 114, + 655, + 221, + 662 + ], + "type": "text", + "content": "- Each fold creates multiple overlapping layers" + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 114, + 662, + 297, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 662, + 297, + 668 + ], + "spans": [ + { + "bbox": [ + 114, + 662, + 297, + 668 + ], + "type": "text", + "content": "- The hole appears to be punched through the right wing of the completed plane" + } + ] + } + ], + "index": 55 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 673, + 496, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 673, + 496, + 687 + ], + "spans": [ + { + "bbox": [ + 113, + 673, + 496, + 687 + ], + "type": "text", + "content": "Based on the specific folding pattern shown in the diagrams, when the sheet is completely folded into the paper plane configuration, the location where the hole is punched consists of 8 layers of paper stacked together." + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 113, + 691, + 425, + 699 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 691, + 425, + 699 + ], + "spans": [ + { + "bbox": [ + 113, + 691, + 425, + 699 + ], + "type": "text", + "content": "Therefore, when Paulo unfolds the paper, he would find 8 holes in the sheet - one for each layer that was penetrated by the single punch." + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 114, + 704, + 139, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 704, + 139, + 711 + ], + "spans": [ + { + "bbox": [ + 114, + 704, + 139, + 711 + ], + "type": "text", + "content": "Answer: C" + } + ] + } + ], + "index": 59 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "54" + } + ] + } + ], + "index": 61 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 53 + }, + { + "para_blocks": [ + { + "bbox": [ + 268, + 89, + 337, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 268, + 89, + 337, + 102 + ], + "spans": [ + { + "bbox": [ + 268, + 89, + 337, + 102 + ], + "type": "text", + "content": "Spatial (Hard)" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 363, + 89, + 375, + 100 + ], + "blocks": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "lines": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "spans": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "type": "image", + "image_path": "a04ebb8a43502409b5fd77351daf18ccb85f6f642ae25c7224e556230afbcbc6.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 173, + 111, + 231, + 147 + ], + "blocks": [ + { + "bbox": [ + 173, + 111, + 231, + 147 + ], + "lines": [ + { + "bbox": [ + 173, + 111, + 231, + 147 + ], + "spans": [ + { + "bbox": [ + 173, + 111, + 231, + 147 + ], + "type": "image", + "image_path": "14c6ef28685649387593825d78b70250342fe608769574e9273cfde733c2252b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 149, + 723, + 459, + 736 + ], + "lines": [ + { + "bbox": [ + 149, + 723, + 459, + 736 + ], + "spans": [ + { + "bbox": [ + 149, + 723, + 459, + 736 + ], + "type": "text", + "content": "Figure 42: A sample error case of Spatial Reasoning (difficulty: Hard)." + } + ] + } + ], + "index": 64, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 238, + 114, + 432, + 144 + ], + "blocks": [ + { + "bbox": [ + 238, + 114, + 432, + 144 + ], + "lines": [ + { + "bbox": [ + 238, + 114, + 432, + 144 + ], + "spans": [ + { + "bbox": [ + 238, + 114, + 432, + 144 + ], + "type": "image", + "image_path": "aab8e64fbb9a68e5fd7bb86a766ebaf859645b89ced9f8bad955ead33c881c8a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 113, + 156, + 496, + 176 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 156, + 496, + 176 + ], + "spans": [ + { + "bbox": [ + 113, + 156, + 496, + 176 + ], + "type": "text", + "content": "Question: Betty walked around the park once, starting from the marked point in the direction of the arrow. She took 4 pictures. In which order did she take the pictures?" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 113, + 177, + 265, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 177, + 265, + 186 + ], + "spans": [ + { + "bbox": [ + 113, + 177, + 265, + 186 + ], + "type": "text", + "content": "Options: A: 1234. B: 1324. C: 2143. D: 2341" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 113, + 191, + 182, + 201 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 191, + 182, + 201 + ], + "spans": [ + { + "bbox": [ + 113, + 191, + 182, + 201 + ], + "type": "text", + "content": "Ground Truth: (C)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 113, + 207, + 496, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 207, + 496, + 223 + ], + "spans": [ + { + "bbox": [ + 113, + 207, + 496, + 223 + ], + "type": "text", + "content": "Claude-3.7-Sonnet-Thinking: This problem requires me to determine the order in which Betty took 4 pictures as she walked around a park. I need to look at the diagram and figure out the sequence." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 113, + 228, + 349, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 228, + 349, + 236 + ], + "spans": [ + { + "bbox": [ + 113, + 228, + 349, + 236 + ], + "type": "text", + "content": "First, I'll analyze what I see in the leftmost diagram, which seems to show the park layout:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 114, + 236, + 273, + 264 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 114, + 236, + 262, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 236, + 262, + 243 + ], + "spans": [ + { + "bbox": [ + 114, + 236, + 262, + 243 + ], + "type": "text", + "content": "- There's a cylindrical object (like a column or tree trunk)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 114, + 243, + 273, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 243, + 273, + 251 + ], + "spans": [ + { + "bbox": [ + 114, + 243, + 273, + 251 + ], + "type": "text", + "content": "- There's a cone-shaped object (like a pine tree or monument)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 114, + 251, + 260, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 251, + 260, + 258 + ], + "spans": [ + { + "bbox": [ + 114, + 251, + 260, + 258 + ], + "type": "text", + "content": "- There's a dome-shaped object (like a small hill or rock)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 114, + 258, + 260, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 258, + 260, + 264 + ], + "spans": [ + { + "bbox": [ + 114, + 258, + 260, + 264 + ], + "type": "text", + "content": "- There's an arrow indicating the direction Betty walked" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 114, + 271, + 362, + 307 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 113, + 271, + 343, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 271, + 343, + 279 + ], + "spans": [ + { + "bbox": [ + 113, + 271, + 343, + 279 + ], + "type": "text", + "content": "Then I have 4 numbered pictures, each showing different arrangements of these objects:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 114, + 279, + 307, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 279, + 307, + 285 + ], + "spans": [ + { + "bbox": [ + 114, + 279, + 307, + 285 + ], + "type": "text", + "content": "- Picture 1: Shows the dome shape on the left and the cylinder on the right" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 114, + 285, + 305, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 285, + 305, + 293 + ], + "spans": [ + { + "bbox": [ + 114, + 285, + 305, + 293 + ], + "type": "text", + "content": "- Picture 2: Shows the cone shape on the left and the cylinder on the right" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 114, + 293, + 307, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 293, + 307, + 300 + ], + "spans": [ + { + "bbox": [ + 114, + 293, + 307, + 300 + ], + "type": "text", + "content": "- Picture 3: Shows the cylinder on the left and the dome shape on the right." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 114, + 300, + 362, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 300, + 362, + 307 + ], + "spans": [ + { + "bbox": [ + 114, + 300, + 362, + 307 + ], + "type": "text", + "content": "- Picture 4: Shows the cylinder on the left and both the dome shape and cone shape on the right" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 313, + 442, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 313, + 442, + 321 + ], + "spans": [ + { + "bbox": [ + 113, + 313, + 442, + 321 + ], + "type": "text", + "content": "I need to determine the order in which Betty would see these views as she walks around the park in the direction of the arrow." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 113, + 327, + 371, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 327, + 371, + 335 + ], + "spans": [ + { + "bbox": [ + 113, + 327, + 371, + 335 + ], + "type": "text", + "content": "Looking at the main diagram, the arrow indicates she's walking counterclockwise around the park." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 113, + 342, + 342, + 349 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 342, + 342, + 349 + ], + "spans": [ + { + "bbox": [ + 113, + 342, + 342, + 349 + ], + "type": "text", + "content": "If I start at the marked point and move in the direction of the arrow (counterclockwise):" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 114, + 350, + 466, + 376 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 114, + 350, + 365, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 350, + 365, + 356 + ], + "spans": [ + { + "bbox": [ + 114, + 350, + 365, + 356 + ], + "type": "text", + "content": "1. Betty would first see the dome on her left and the cylinder on her right - this matches Picture 1" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 114, + 356, + 452, + 362 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 356, + 452, + 362 + ], + "spans": [ + { + "bbox": [ + 114, + 356, + 452, + 362 + ], + "type": "text", + "content": "2. As she continues counterclockwise, she would next see the cone on her left and the cylinder on her right - this matches Picture 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 114, + 363, + 401, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 363, + 401, + 369 + ], + "spans": [ + { + "bbox": [ + 114, + 363, + 401, + 369 + ], + "type": "text", + "content": "3. Continuing further, she would see the cylinder on her left and the dome on her right - this matches Picture 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 114, + 370, + 466, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 370, + 466, + 376 + ], + "spans": [ + { + "bbox": [ + 114, + 370, + 466, + 376 + ], + "type": "text", + "content": "4. Finally, completing the circle, she would see the cylinder on her left with both the dome and cone on her right - this matches Picture 4" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 384, + 335, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 384, + 335, + 392 + ], + "spans": [ + { + "bbox": [ + 113, + 384, + 335, + 392 + ], + "type": "text", + "content": "So the order of the pictures would be 1, 2, 3, 4, which corresponds to option (A) 1234." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 113, + 398, + 196, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 398, + 196, + 406 + ], + "spans": [ + { + "bbox": [ + 113, + 398, + 196, + 406 + ], + "type": "text", + "content": "Let me double-check this logic:" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 113, + 412, + 264, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 412, + 264, + 419 + ], + "spans": [ + { + "bbox": [ + 113, + 412, + 264, + 419 + ], + "type": "text", + "content": "From the starting position and moving counterclockwise:" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 114, + 419, + 255, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 419, + 255, + 426 + ], + "spans": [ + { + "bbox": [ + 114, + 419, + 255, + 426 + ], + "type": "text", + "content": "- First view: dome on left, cylinder on right (Picture 1)" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 114, + 427, + 259, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 427, + 259, + 434 + ], + "spans": [ + { + "bbox": [ + 114, + 427, + 259, + 434 + ], + "type": "text", + "content": "- Second view: cone on left, cylinder on right (Picture 2)" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 114, + 435, + 253, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 435, + 253, + 441 + ], + "spans": [ + { + "bbox": [ + 114, + 435, + 253, + 441 + ], + "type": "text", + "content": "- Third view: cylinder on left, dome on right (Picture 1)" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 114, + 441, + 286, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 441, + 286, + 448 + ], + "spans": [ + { + "bbox": [ + 114, + 441, + 286, + 448 + ], + "type": "text", + "content": "- Fourth view: cylinder on left, dome and cone on right (Picture 4)" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 114, + 454, + 222, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 454, + 222, + 461 + ], + "spans": [ + { + "bbox": [ + 114, + 454, + 222, + 461 + ], + "type": "text", + "content": "This confirms that the answer is (A) 1234." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 114, + 462, + 158, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 462, + 158, + 468 + ], + "spans": [ + { + "bbox": [ + 114, + 462, + 158, + 468 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 114, + 468, + 244, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 468, + 244, + 475 + ], + "spans": [ + { + "bbox": [ + 114, + 468, + 244, + 475 + ], + "type": "text", + "content": "# Determining Picture Order in Betty's Park Walk" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 113, + 482, + 438, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 482, + 438, + 490 + ], + "spans": [ + { + "bbox": [ + 113, + 482, + 438, + 490 + ], + "type": "text", + "content": "I need to figure out the order in which Betty took 4 pictures as she walked once around the park in the direction of the arrow." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 113, + 497, + 194, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 497, + 194, + 503 + ], + "spans": [ + { + "bbox": [ + 113, + 497, + 194, + 503 + ], + "type": "text", + "content": "Analyzing the Park Layout" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 114, + 504, + 200, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 504, + 200, + 510 + ], + "spans": [ + { + "bbox": [ + 114, + 504, + 200, + 510 + ], + "type": "text", + "content": "From the first diagram, I can see:" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 114, + 511, + 263, + 539 + ], + "type": "list", + "angle": 0, + "index": 46, + "blocks": [ + { + "bbox": [ + 114, + 511, + 253, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 511, + 253, + 518 + ], + "spans": [ + { + "bbox": [ + 114, + 511, + 253, + 518 + ], + "type": "text", + "content": "- A cylindrical object (looks like a column/tree trunk)" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 114, + 518, + 263, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 518, + 263, + 525 + ], + "spans": [ + { + "bbox": [ + 114, + 518, + 263, + 525 + ], + "type": "text", + "content": "- A cone-shaped object (like a pointed tree or monument)" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 114, + 525, + 241, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 525, + 241, + 532 + ], + "spans": [ + { + "bbox": [ + 114, + 525, + 241, + 532 + ], + "type": "text", + "content": "- A dome-shaped object (like a small hill or rock)" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 114, + 532, + 247, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 532, + 247, + 539 + ], + "spans": [ + { + "bbox": [ + 114, + 532, + 247, + 539 + ], + "type": "text", + "content": "- An arrow indicating counterclockwise movement" + } + ] + } + ], + "index": 45 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 546, + 186, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 546, + 186, + 552 + ], + "spans": [ + { + "bbox": [ + 113, + 546, + 186, + 552 + ], + "type": "text", + "content": "Analyzing Each Picture" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 114, + 553, + 343, + 581 + ], + "type": "list", + "angle": 0, + "index": 52, + "blocks": [ + { + "bbox": [ + 114, + 553, + 303, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 553, + 303, + 559 + ], + "spans": [ + { + "bbox": [ + 114, + 553, + 303, + 559 + ], + "type": "text", + "content": "1. **Picture 1**: Shows the dome shape on left, cylindrical object on right" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 114, + 560, + 301, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 560, + 301, + 567 + ], + "spans": [ + { + "bbox": [ + 114, + 560, + 301, + 567 + ], + "type": "text", + "content": "2. **Picture 2**: Shows the cone shape on left, cylindrical object on right" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 114, + 567, + 303, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 567, + 303, + 574 + ], + "spans": [ + { + "bbox": [ + 114, + 567, + 303, + 574 + ], + "type": "text", + "content": "3. **Picture 3**: Shows the cylindrical object on left, dome shape on right" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 114, + 574, + 343, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 574, + 343, + 581 + ], + "spans": [ + { + "bbox": [ + 114, + 574, + 343, + 581 + ], + "type": "text", + "content": "4. **Picture 4**: Shows the cylindrical object on left, both dome and cone shapes on right" + } + ] + } + ], + "index": 51 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 588, + 193, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 588, + 193, + 595 + ], + "spans": [ + { + "bbox": [ + 113, + 588, + 193, + 595 + ], + "type": "text", + "content": "Determining the Sequence" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 114, + 596, + 417, + 630 + ], + "type": "list", + "angle": 0, + "index": 59, + "blocks": [ + { + "bbox": [ + 114, + 596, + 334, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 596, + 334, + 602 + ], + "spans": [ + { + "bbox": [ + 114, + 596, + 334, + 602 + ], + "type": "text", + "content": "Starting from the marked point and moving counterclockwise (following the arrow):" + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 114, + 603, + 348, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 603, + 348, + 609 + ], + "spans": [ + { + "bbox": [ + 114, + 603, + 348, + 609 + ], + "type": "text", + "content": "1. First, Betty would see the dome on her left and the cylinder on her right " + }, + { + "bbox": [ + 114, + 603, + 348, + 609 + ], + "type": "inline_equation", + "content": "\\rightarrow^{**}" + }, + { + "bbox": [ + 114, + 603, + 348, + 609 + ], + "type": "text", + "content": " Picture " + }, + { + "bbox": [ + 114, + 603, + 348, + 609 + ], + "type": "inline_equation", + "content": "1^{**}" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 114, + 609, + 417, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 609, + 417, + 616 + ], + "spans": [ + { + "bbox": [ + 114, + 609, + 417, + 616 + ], + "type": "text", + "content": "2. Continuing counterclockwise, she would next see the cone on her left and the cylinder on her right " + }, + { + "bbox": [ + 114, + 609, + 417, + 616 + ], + "type": "inline_equation", + "content": "\\rightarrow^{**}" + }, + { + "bbox": [ + 114, + 609, + 417, + 616 + ], + "type": "text", + "content": " Picture " + }, + { + "bbox": [ + 114, + 609, + 417, + 616 + ], + "type": "inline_equation", + "content": "2^{**}" + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 114, + 616, + 371, + 623 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 616, + 371, + 623 + ], + "spans": [ + { + "bbox": [ + 114, + 616, + 371, + 623 + ], + "type": "text", + "content": "3. Moving further, she would see the cylinder on her left and the dome on her right " + }, + { + "bbox": [ + 114, + 616, + 371, + 623 + ], + "type": "inline_equation", + "content": "\\rightarrow^{**}" + }, + { + "bbox": [ + 114, + 616, + 371, + 623 + ], + "type": "text", + "content": " Picture 3\\*\\*" + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 114, + 624, + 389, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 624, + 389, + 630 + ], + "spans": [ + { + "bbox": [ + 114, + 624, + 389, + 630 + ], + "type": "text", + "content": "4. Finally, she would see the cylinder on her left with both the dome and cone on her right " + }, + { + "bbox": [ + 114, + 624, + 389, + 630 + ], + "type": "inline_equation", + "content": "\\rightarrow^{**}" + }, + { + "bbox": [ + 114, + 624, + 389, + 630 + ], + "type": "text", + "content": " Picture " + }, + { + "bbox": [ + 114, + 624, + 389, + 630 + ], + "type": "inline_equation", + "content": "4^{**}" + } + ] + } + ], + "index": 58 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 637, + 204, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 637, + 204, + 645 + ], + "spans": [ + { + "bbox": [ + 113, + 637, + 204, + 645 + ], + "type": "text", + "content": "This gives us the sequence 1-2-3-4." + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 114, + 651, + 143, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 651, + 143, + 658 + ], + "spans": [ + { + "bbox": [ + 114, + 651, + 143, + 658 + ], + "type": "text", + "content": "Answer: A" + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 113, + 662, + 269, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 662, + 269, + 672 + ], + "spans": [ + { + "bbox": [ + 113, + 662, + 269, + 672 + ], + "type": "text", + "content": "Error Category: Spatial / Orientation Error" + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 113, + 673, + 496, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 673, + 496, + 712 + ], + "spans": [ + { + "bbox": [ + 113, + 673, + 496, + 712 + ], + "type": "text", + "content": "Error Reason: The model makes a spatial orientation error by incorrectly interpreting the shifting perspectives as Betty walks around the park, especially at the starting point. It assumes a consistent viewpoint but fails to mentally rotate the scene to match what Betty would actually see from each new position." + } + ] + } + ], + "index": 63 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "55" + } + ] + } + ], + "index": 65 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 54 + }, + { + "para_blocks": [ + { + "bbox": [ + 268, + 89, + 337, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 268, + 89, + 337, + 102 + ], + "spans": [ + { + "bbox": [ + 268, + 89, + 337, + 102 + ], + "type": "text", + "content": "Spatial (Hard)" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 363, + 89, + 375, + 100 + ], + "blocks": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "lines": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "spans": [ + { + "bbox": [ + 363, + 89, + 375, + 100 + ], + "type": "image", + "image_path": "2586382fa16ad27a1d1de419769a9996fc1643298d2473d0ee193d5899f640c7.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 115, + 115, + 154, + 135 + ], + "blocks": [ + { + "bbox": [ + 115, + 115, + 154, + 135 + ], + "lines": [ + { + "bbox": [ + 115, + 115, + 154, + 135 + ], + "spans": [ + { + "bbox": [ + 115, + 115, + 154, + 135 + ], + "type": "image", + "image_path": "6e1c6e63d19fe7d1bf2557d0f987c2866548f050e00b0b561dc19978100bddba.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 146, + 723, + 463, + 736 + ], + "lines": [ + { + "bbox": [ + 146, + 723, + 463, + 736 + ], + "spans": [ + { + "bbox": [ + 146, + 723, + 463, + 736 + ], + "type": "text", + "content": "Figure 43: A sample correct case of Spatial Reasoning (difficulty: Hard)." + } + ] + } + ], + "index": 69, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 142, + 137, + 147 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 142, + 137, + 147 + ], + "spans": [ + { + "bbox": [ + 132, + 142, + 137, + 147 + ], + "type": "text", + "content": "A" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 167, + 111, + 200, + 137 + ], + "blocks": [ + { + "bbox": [ + 167, + 111, + 200, + 137 + ], + "lines": [ + { + "bbox": [ + 167, + 111, + 200, + 137 + ], + "spans": [ + { + "bbox": [ + 167, + 111, + 200, + 137 + ], + "type": "image", + "image_path": "b7644b24bb6cc17803106504397075a33326ffdc8d6e7a7d2ceb81edbe695538.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 181, + 141, + 186, + 147 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 181, + 141, + 186, + 147 + ], + "spans": [ + { + "bbox": [ + 181, + 141, + 186, + 147 + ], + "type": "text", + "content": "B" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 209, + 111, + 242, + 137 + ], + "blocks": [ + { + "bbox": [ + 209, + 111, + 242, + 137 + ], + "lines": [ + { + "bbox": [ + 209, + 111, + 242, + 137 + ], + "spans": [ + { + "bbox": [ + 209, + 111, + 242, + 137 + ], + "type": "image", + "image_path": "988fe318a4b806c51614855dc118b5905c5628e9d1a730257062c2d4125fef59.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 223, + 141, + 228, + 147 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 223, + 141, + 228, + 147 + ], + "spans": [ + { + "bbox": [ + 223, + 141, + 228, + 147 + ], + "type": "text", + "content": "C" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 254, + 111, + 287, + 137 + ], + "blocks": [ + { + "bbox": [ + 254, + 111, + 287, + 137 + ], + "lines": [ + { + "bbox": [ + 254, + 111, + 287, + 137 + ], + "spans": [ + { + "bbox": [ + 254, + 111, + 287, + 137 + ], + "type": "image", + "image_path": "b762364e4fbdadb57bcfa6d9c604c9302b6522c57bd98dcd1c60d88d2028f45b.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 269, + 141, + 274, + 147 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 269, + 141, + 274, + 147 + ], + "spans": [ + { + "bbox": [ + 269, + 141, + 274, + 147 + ], + "type": "text", + "content": "D" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 293, + 112, + 496, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 112, + 496, + 133 + ], + "spans": [ + { + "bbox": [ + 293, + 112, + 496, + 133 + ], + "type": "text", + "content": "Question: From the flat expansion view of paper boxes, which paper box is different from the other three?" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 293, + 139, + 362, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 139, + 362, + 150 + ], + "spans": [ + { + "bbox": [ + 293, + 139, + 362, + 150 + ], + "type": "text", + "content": "Ground Truth: (A)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 112, + 160, + 496, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 160, + 496, + 175 + ], + "spans": [ + { + "bbox": [ + 112, + 160, + 496, + 175 + ], + "type": "text", + "content": "Claude-3.7-Sonnet-Thinking: This problem is asking me to identify which one of the four flat expansions of paper boxes is different from the others. Let me analyze each one carefully." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 112, + 181, + 495, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 181, + 495, + 196 + ], + "spans": [ + { + "bbox": [ + 112, + 181, + 495, + 196 + ], + "type": "text", + "content": "These patterns represent unfolded 3D boxes (nets), where the squares would fold to create a 3D shape. The black squares and circles represent some kind of marking or pattern on the surface of the box." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 113, + 202, + 239, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 202, + 239, + 210 + ], + "spans": [ + { + "bbox": [ + 113, + 202, + 239, + 210 + ], + "type": "text", + "content": "Let me analyze the structure of each expansion:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 113, + 210, + 453, + 239 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 113, + 210, + 453, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 210, + 453, + 217 + ], + "spans": [ + { + "bbox": [ + 113, + 210, + 453, + 217 + ], + "type": "text", + "content": "A: This appears to be a linear arrangement of 5 squares with a T-shape extension. The pattern has black regions in various squares." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 113, + 217, + 422, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 217, + 422, + 224 + ], + "spans": [ + { + "bbox": [ + 113, + 217, + 422, + 224 + ], + "type": "text", + "content": "B: This is a more complex shape with 7 squares arranged in a cross-like pattern. It has black regions in various squares." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 113, + 224, + 376, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 224, + 376, + 232 + ], + "spans": [ + { + "bbox": [ + 113, + 224, + 376, + 232 + ], + "type": "text", + "content": "C: This also appears to be an arrangement of squares that could fold into a box, with black markings." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 113, + 232, + 315, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 232, + 315, + 239 + ], + "spans": [ + { + "bbox": [ + 113, + 232, + 315, + 239 + ], + "type": "text", + "content": "D: This is another cross-shaped arrangement of squares with black markings." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 244, + 466, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 244, + 466, + 253 + ], + "spans": [ + { + "bbox": [ + 113, + 244, + 466, + 253 + ], + "type": "text", + "content": "To determine which one is different, I need to see if they would all fold into valid 3D boxes and if there's any pattern that sets one apart." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 113, + 259, + 304, + 281 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 113, + 259, + 225, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 259, + 225, + 266 + ], + "spans": [ + { + "bbox": [ + 113, + 259, + 225, + 266 + ], + "type": "text", + "content": "Let me think about how these would fold:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 113, + 266, + 282, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 266, + 282, + 274 + ], + "spans": [ + { + "bbox": [ + 113, + 266, + 282, + 274 + ], + "type": "text", + "content": "- A cube has 6 faces, so a valid net should have exactly 6 squares" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 113, + 274, + 304, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 274, + 304, + 281 + ], + "spans": [ + { + "bbox": [ + 113, + 274, + 304, + 281 + ], + "type": "text", + "content": "- The squares must be connected in a way that allows folding into a cube" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 287, + 201, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 287, + 201, + 294 + ], + "spans": [ + { + "bbox": [ + 113, + 287, + 201, + 294 + ], + "type": "text", + "content": "Let me count the squares in each:" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 113, + 294, + 166, + 323 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 113, + 294, + 166, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 294, + 166, + 301 + ], + "spans": [ + { + "bbox": [ + 113, + 294, + 166, + 301 + ], + "type": "text", + "content": "A: I count 5 squares" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 113, + 301, + 166, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 301, + 166, + 308 + ], + "spans": [ + { + "bbox": [ + 113, + 301, + 166, + 308 + ], + "type": "text", + "content": "B: I count 7 squares" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 113, + 308, + 166, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 308, + 166, + 316 + ], + "spans": [ + { + "bbox": [ + 113, + 308, + 166, + 316 + ], + "type": "text", + "content": "C: I count 7 squares" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 113, + 316, + 166, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 316, + 166, + 323 + ], + "spans": [ + { + "bbox": [ + 113, + 316, + 166, + 323 + ], + "type": "text", + "content": "D:1 count 7 squares" + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 112, + 329, + 495, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 329, + 495, + 345 + ], + "spans": [ + { + "bbox": [ + 112, + 329, + 495, + 345 + ], + "type": "text", + "content": "Looking at the number of squares, A has 5 squares while B, C, and D each have 7 squares. A cube needs 6 faces, so none of these would fold into a perfect cube. A is different in that it has fewer squares than the others." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 113, + 350, + 344, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 350, + 344, + 358 + ], + "spans": [ + { + "bbox": [ + 113, + 350, + 344, + 358 + ], + "type": "text", + "content": "However, let me look more carefully at these patterns to see if there's another difference." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 113, + 365, + 179, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 365, + 179, + 372 + ], + "spans": [ + { + "bbox": [ + 113, + 365, + 179, + 372 + ], + "type": "text", + "content": "Actually, let me recount:" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 113, + 372, + 332, + 401 + ], + "type": "list", + "angle": 0, + "index": 39, + "blocks": [ + { + "bbox": [ + 113, + 372, + 332, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 372, + 332, + 380 + ], + "spans": [ + { + "bbox": [ + 113, + 372, + 332, + 380 + ], + "type": "text", + "content": "A: Looking more carefully, I see 6 squares arranged in a T-pattern with an extension" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 113, + 380, + 272, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 380, + 272, + 387 + ], + "spans": [ + { + "bbox": [ + 113, + 380, + 272, + 387 + ], + "type": "text", + "content": "B: I see 7 squares arranged in a cross-pattern with extensions" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 113, + 387, + 216, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 387, + 216, + 394 + ], + "spans": [ + { + "bbox": [ + 113, + 387, + 216, + 394 + ], + "type": "text", + "content": "C: I see 7 squares arranged in a pattern" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 113, + 394, + 231, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 394, + 231, + 401 + ], + "spans": [ + { + "bbox": [ + 113, + 394, + 231, + 401 + ], + "type": "text", + "content": "D: I see 7 squares arranged in a cross-pattern" + } + ] + } + ], + "index": 38 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 112, + 407, + 495, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 407, + 495, + 422 + ], + "spans": [ + { + "bbox": [ + 112, + 407, + 495, + 422 + ], + "type": "text", + "content": "So option A has 6 squares, while B, C, and D each have 7 squares. Since a cube has 6 faces, A is the only one that could potentially fold into a proper cube. The others have an extra square." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 113, + 428, + 204, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 428, + 204, + 435 + ], + "spans": [ + { + "bbox": [ + 113, + 428, + 204, + 435 + ], + "type": "text", + "content": "Let me check once more carefully:" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 113, + 435, + 350, + 464 + ], + "type": "list", + "angle": 0, + "index": 46, + "blocks": [ + { + "bbox": [ + 113, + 435, + 350, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 435, + 350, + 442 + ], + "spans": [ + { + "bbox": [ + 113, + 435, + 350, + 442 + ], + "type": "text", + "content": "A: I count squares from left to right and top to bottom: 1, 2, 3, 4, 5, 6. So there are 6 squares." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 113, + 443, + 166, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 443, + 166, + 449 + ], + "spans": [ + { + "bbox": [ + 113, + 443, + 166, + 449 + ], + "type": "text", + "content": "B: I count 7 squares" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 113, + 449, + 167, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 449, + 167, + 457 + ], + "spans": [ + { + "bbox": [ + 113, + 449, + 167, + 457 + ], + "type": "text", + "content": "C: I count 7 squares." + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 113, + 457, + 167, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 457, + 167, + 464 + ], + "spans": [ + { + "bbox": [ + 113, + 457, + 167, + 464 + ], + "type": "text", + "content": "D: I count 7 squares." + } + ] + } + ], + "index": 45 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 112, + 470, + 430, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 470, + 430, + 479 + ], + "spans": [ + { + "bbox": [ + 112, + 470, + 430, + 479 + ], + "type": "text", + "content": "So based on the number of squares, A is different from the others because it has 6 squares while the others have 7 squares." + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 112, + 483, + 495, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 483, + 495, + 499 + ], + "spans": [ + { + "bbox": [ + 112, + 483, + 495, + 499 + ], + "type": "text", + "content": "But let me think about whether there could be another difference that's more subtle, like the pattern of black regions or the connectivity of the squares." + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 113, + 505, + 436, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 505, + 436, + 521 + ], + "spans": [ + { + "bbox": [ + 113, + 505, + 436, + 521 + ], + "type": "text", + "content": "Looking at the patterns of black and white regions, they all seem to have black regions and circles in different arrangements. Looking at the connectivity of the squares, they all have different patterns of how the squares connect." + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 113, + 526, + 495, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 526, + 495, + 542 + ], + "spans": [ + { + "bbox": [ + 113, + 526, + 495, + 542 + ], + "type": "text", + "content": "I think the key difference is the number of squares. A has 6 squares which is the exact number needed for a cube. B, C, and D all have 7 squares, which is one too many for a standard cube - these would need to have one square overlapping or not used when folded." + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 113, + 548, + 165, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 548, + 165, + 555 + ], + "spans": [ + { + "bbox": [ + 113, + 548, + 165, + 555 + ], + "type": "text", + "content": "So my answer is A." + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 113, + 555, + 156, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 555, + 156, + 562 + ], + "spans": [ + { + "bbox": [ + 113, + 555, + 156, + 562 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 113, + 562, + 220, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 562, + 220, + 570 + ], + "spans": [ + { + "bbox": [ + 113, + 562, + 220, + 570 + ], + "type": "text", + "content": "Analysis of Paper Box Flat Expansions" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 113, + 575, + 448, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 575, + 448, + 584 + ], + "spans": [ + { + "bbox": [ + 113, + 575, + 448, + 584 + ], + "type": "text", + "content": "I need to determine which of these flat expansions (nets) is different from the others by examining their structure and properties." + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 113, + 590, + 283, + 626 + ], + "type": "list", + "angle": 0, + "index": 60, + "blocks": [ + { + "bbox": [ + 113, + 590, + 261, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 590, + 261, + 597 + ], + "spans": [ + { + "bbox": [ + 113, + 590, + 261, + 597 + ], + "type": "text", + "content": "First, let me count the number of squares in each option:" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 113, + 597, + 279, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 597, + 279, + 605 + ], + "spans": [ + { + "bbox": [ + 113, + 597, + 279, + 605 + ], + "type": "text", + "content": "- Option A: 6 squares arranged in an L-shape with an extension" + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 113, + 605, + 282, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 605, + 282, + 613 + ], + "spans": [ + { + "bbox": [ + 113, + 605, + 282, + 613 + ], + "type": "text", + "content": "- Option B: 7 squares arranged in a cross pattern with extensions" + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 113, + 613, + 270, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 613, + 270, + 619 + ], + "spans": [ + { + "bbox": [ + 113, + 613, + 270, + 619 + ], + "type": "text", + "content": "- Option C: 7 squares arranged in a T-shape with extensions" + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 113, + 619, + 283, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 619, + 283, + 626 + ], + "spans": [ + { + "bbox": [ + 113, + 619, + 283, + 626 + ], + "type": "text", + "content": "- Option D: 7 squares arranged in a cross pattern with extensions" + } + ] + } + ], + "index": 59 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 632, + 441, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 632, + 441, + 640 + ], + "spans": [ + { + "bbox": [ + 113, + 632, + 441, + 640 + ], + "type": "text", + "content": "The key difference I notice is that option A has exactly 6 squares, while all the other options (B, C, and D) have 7 squares each." + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 112, + 646, + 495, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 646, + 495, + 661 + ], + "spans": [ + { + "bbox": [ + 112, + 646, + 495, + 661 + ], + "type": "text", + "content": "This is significant because a cube has exactly 6 faces, so a proper net for a cube should have exactly 6 squares. Options B, C, and D all have one extra square, which means they would either:" + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 113, + 661, + 229, + 682 + ], + "type": "list", + "angle": 0, + "index": 66, + "blocks": [ + { + "bbox": [ + 113, + 661, + 193, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 661, + 193, + 668 + ], + "spans": [ + { + "bbox": [ + 113, + 661, + 193, + 668 + ], + "type": "text", + "content": "1. Not fold into a proper cube" + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 113, + 668, + 229, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 668, + 229, + 675 + ], + "spans": [ + { + "bbox": [ + 113, + 668, + 229, + 675 + ], + "type": "text", + "content": "2. Have an overlapping square when folded" + } + ] + } + ], + "index": 64 + }, + { + "bbox": [ + 113, + 675, + 212, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 675, + 212, + 682 + ], + "spans": [ + { + "bbox": [ + 113, + 675, + 212, + 682 + ], + "type": "text", + "content": "3. Require leaving one square unused" + } + ] + } + ], + "index": 65 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 113, + 688, + 477, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 688, + 477, + 696 + ], + "spans": [ + { + "bbox": [ + 113, + 688, + 477, + 696 + ], + "type": "text", + "content": "Therefore, option A is different from the other three because it's the only one with the correct number of squares (6) to form a standard cube." + } + ] + } + ], + "index": 67 + }, + { + "bbox": [ + 113, + 703, + 143, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 703, + 143, + 710 + ], + "spans": [ + { + "bbox": [ + 113, + 703, + 143, + 710 + ], + "type": "text", + "content": "Answer: A" + } + ] + } + ], + "index": 68 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Submission In Progress" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "56" + } + ] + } + ], + "index": 70 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 55 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10344/cbca1f99-6d1c-4e0d-b299-960633d34c1c_content_list.json b/data/2025/2504_10xxx/2504.10344/cbca1f99-6d1c-4e0d-b299-960633d34c1c_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..7282ae50d8759ef7c2d2e7e41263c8662748017c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/cbca1f99-6d1c-4e0d-b299-960633d34c1c_content_list.json @@ -0,0 +1,3201 @@ +[ + { + "type": "text", + "text": "ALMTokenizer: A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling", + "text_level": 1, + "bbox": [ + 94, + 109, + 875, + 156 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Dongchao Yang $^{1}$ Songxiang Liu $^{2}$ Haohan Guo $^{1}$ Jiankun Zhao $^{1}$ Yuanyuan Wang $^{1}$ Helin Wang $^{2}$ Zeqian Ju $^{2}$ Xubo Liu $^{2}$ Xueyuan Chen $^{1}$ Xu Tan $^{2}$ Xixin Wu $^{1}$ Helen Meng $^{1}$", + "bbox": [ + 86, + 198, + 883, + 232 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 241, + 258, + 320, + 273 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent advancements in audio language models have underscored the pivotal role of audio tokenization, which converts audio signals into discrete tokens, thereby facilitating the application of language model architectures to the audio domain. In this study, we introduce ALMTokenizer, a novel low-bitrate and semantically rich audio codec tokenizer for audio language models. Prior methods, such as Encodec, typically encode individual audio frames into discrete tokens without considering the use of context information across frames. Unlike these methods, we introduce a novel query-based compression strategy to capture holistic information with a set of learnable query tokens by explicitly modeling the context information across frames. This design not only enables the codec model to capture more semantic information but also encodes the audio signal with fewer token sequences. Additionally, to enhance the semantic information in audio codec models, we introduce the following: (1) A masked autoencoder (MAE) loss, (2) Vector quantization based on semantic priors, and (3) An autoregressive (AR) prediction loss. As a result, ALMTokenizer achieves competitive reconstruction performance relative to state-of-the-art approaches while operating at a lower bitrate. Within the same audio language model framework, ALMTokenizer outperforms previous tokenizers in audio understanding and generation tasks. $^{1}$", + "bbox": [ + 117, + 279, + 444, + 732 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 86, + 761, + 217, + 777 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The field of generative modeling has witnessed remarkable progress, largely driven by the success of autoregressive", + "bbox": [ + 84, + 786, + 473, + 818 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "*Equal contribution 1The Chinese University of Hong Kong, Hong Kong, China 2Independent Authors. Correspondence to: Dongchao Yang .", + "bbox": [ + 84, + 825, + 473, + 866 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "(AR) models in the development of large language models (LLMs) (OpenAI, 2023). Inspired by the success of LLMs in the fields of natural language processing (NLP), recent works have begun to employ AR transformers for audio generation (Borsos et al., 2023a; Agostinelli et al., 2023; Yang et al., 2023c), such as using the AR transformer paradigm to solve text-to-speech task (Wang et al., 2023), or expanding the text LLM into multimodal LLM by integrating the audio modality into the original LLM (Défossez et al., 2024). Audio tokenizer plays an important role in all of these models, which converts audio signals into discrete token sequence for AR audio language modeling.", + "bbox": [ + 496, + 258, + 887, + 441 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In the literature, audio codec models, such as SoundStream (Zeghidour et al., 2021) and Encodec (Défossez et al., 2022), have been widely adopted as audio tokenizers for audio language models. These generative models aim to represent audio data in a quantized discrete latent space, where the codec's decoder is then used to reconstruct the audio signals from the generated discrete token sequences. Recently, there has been significant interest in the audio community regarding audio codec tokenizers, leading to the proposal of several novel models (Kumar et al., 2023; Ji et al., 2024; Défossez et al., 2024; Parker et al., 2024; Zhang et al., 2023). Despite the advancements in audio codec models, an important research question remains unanswered: which type of audio codec is most suitable for audio language modeling? Inspired by previous works (Borsos et al., 2023a; Parker et al., 2024; Ji et al., 2024; Défossez et al., 2024), these studies investigate two key properties of audio codec models: low bitrate and semantic richness. We first conduct a set of evaluation experiments to explore the influence of bitrate and semantic information on audio language modeling. Specifically, we train three audio codec models with varying bitrates, while keeping the number of vector quantization (VQ) layers constant and adjusting the frame rates to $50\\mathrm{Hz}$ , $25\\mathrm{Hz}$ , and $12.5\\mathrm{Hz}$ . We then train the audio language model using different audio tokenizers on the same dataset. To assess the impact of semantic information, we also train a $12.5\\mathrm{Hz}$ semantic tokenizer and incorporate it into the audio language model. Further details can be found in Appendix B. Figure 1 presents the results, which show that: (1) low-bitrate audio codec models significantly en", + "bbox": [ + 495, + 446, + 888, + 902 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.10344v1 [cs.SD] 14 Apr 2025", + "bbox": [ + 22, + 265, + 60, + 705 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "Pre-print.", + "bbox": [ + 84, + 878, + 147, + 891 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "1http://dongchaoyang.top/ALMTokensizer/", + "bbox": [ + 107, + 891, + 359, + 905 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "hance training and inference efficiency; and (2) semantic information is more easily modeled by LM-based generative methods, e.g. lower PPL and loss. The experimental findings demonstrate the importance of constructing a low-bitrate and semantic-rich audio codec tokenizer for audio language modeling. Based on these results, we propose a novel audio codec tokenizer that offers the following advantages: (1) Low-bitrate: it compresses the audio data into fewer tokens; (2) Semantic-rich: it incorporates abundant semantic information; (3) AR-driven latent space: it optimizes the latent space for autoregressive (AR) modeling.", + "bbox": [ + 84, + 84, + 475, + 251 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To achieve this objective, we propose the following novel techniques: (1) We introduce a novel query-based compression strategy, which uses a set of learnable query tokens to capture holistic information by explicitly modeling the context information across audio frames with transformer layers. This strategy effectively takes advantage of the strong modeling capabilities of transformers to achieve better compression and semantic modeling. It also enables dynamic control over the compression rate by adjusting the number of query tokens. (2) To enhance semantic richness in the codec model, we introduce a Masked Autoencoder (MAE) loss, which encourages the model to capture more global information. (3) Inspired by previous works (Zhu et al., 2024), we propose the integration of semantic priors into the VQ layer. Specifically, we perform k-means clustering on the pre-trained wav2vec2 (Baevski et al., 2020) and BEATs (Chen et al., 2022b) encoder outputs, using the cluster centers to initialize the VQ layer. (4) We observe that AR models struggle to fit the distribution of the residuals in the VQ layers, with token prediction accuracy being notably lower in the second and third VQ layers compared to the first. To address this issue, we introduce an AR prediction loss to optimize the latent space.", + "bbox": [ + 86, + 258, + 477, + 604 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To evaluate the effectiveness of the ALMTokenizer, we first compare its reconstruction and semantic performance with previous state-of-the-art models. Using the same audio language model framework, we then demonstrate that ALMTokenizer achieves superior performance in LM-based audio understanding and generation tasks, including text-to-speech (TTS), speech-to-text (ASR), audio captioning, text-to-sound, text-to-music, and music captioning.", + "bbox": [ + 84, + 604, + 475, + 727 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Works", + "text_level": 1, + "bbox": [ + 84, + 744, + 235, + 762 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Audio Language Models", + "text_level": 1, + "bbox": [ + 84, + 771, + 290, + 786 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recently, there has been a growing interest in bridging audio and text through multimodal learning approaches. Models such as AudioLM (Borsos et al., 2023a) leverage AR transformers and hierarchical modeling techniques to process audio data directly, learning representations that capture both linguistic and acoustic features. Inspired by AudioLM, VALL-E (Wang et al., 2023) and SPEAR-TTS (Kharitonov", + "bbox": [ + 84, + 795, + 475, + 900 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/1f2afdd26c38ad12bec4637ffdc1de7b03af211f664c49636beea44b22135499.jpg", + "image_caption": [ + "Figure 1. The performance comparison when different types of tokenizer is used for audio modeling. PPL refers to perplexity." + ], + "image_footnote": [], + "bbox": [ + 514, + 80, + 870, + 349 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "et al., 2023) formulate the text-to-speech task as an audio language modeling problem: generating an audio token sequence with the help of an autoregressive transformer. MusicLM (Agostinelli et al., 2023) and MusicGen (Copet et al., 2023) frame the text-to-music task as an audio language modeling problem. UniSep (Wang et al., 2025) explores using audio LM to solve audio separation tasks with the help of audio tokenizer. Moshi (Défossez et al., 2024), SpiRitLM (Nguyen et al., 2025), and GLM4-Voice (Zeng et al., 2024) explore speech-to-speech conversation. Furthermore, audio tokenizers can also be combined with discrete diffusion models (Yang et al., 2023d;a; Borsos et al., 2023b; Ju et al., 2024). In all of these models, the audio tokenizer plays a crucial role by transforming audio data into a discrete latent sequence, reducing computational demands compared to directly processing the audio signal, and enhancing the effectiveness and efficiency of the generation process.", + "bbox": [ + 495, + 397, + 888, + 654 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. Audio Tokenizer", + "text_level": 1, + "bbox": [ + 496, + 670, + 645, + 684 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In the literature, both semantic and acoustic tokenizers are widely employed in audio language models. The semantic tokenizer is trained using pre-trained self-supervised learning (SSL) models, such as Hubert (Hsu et al., 2021) and WavLM (Chen et al., 2022a). Applying k-means or vector quantization in these models generates semantic tokens (Zeng et al., 2024; Du et al., 2024; Liu et al., 2024). Previous works (Borsos et al., 2023a) demonstrate that semantic tokens are more easily modeled by language models. However, due to the loss of significant acoustic information in semantic tokens, they rely on an additional decoder to generate high-fidelity waveform, such as a diffusion model (Ho et al., 2020) or flow-matching (Lipman et al., 2022). Inevitably, this additional module results in increased infer", + "bbox": [ + 495, + 694, + 888, + 904 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling", + "bbox": [ + 207, + 56, + 763, + 70 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/3c618b74f688548add74bec89c6981564b20b5fb43a06e3d3e40ad5bc0570a5f.jpg", + "image_caption": [ + "Figure 2. The left part illustrates the framework of the previous audio codec, while the right part provides an overview of the proposed ALMTokensizer. $w$ denotes the window size. The details of ALMTokensizer can be found in Section 3.2." + ], + "image_footnote": [], + "bbox": [ + 135, + 94, + 318, + 287 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/929236d9a847bdafe944104601209dbf98eadb1d765b6edf85d4aa3a2145a540.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 328, + 94, + 836, + 286 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "ence complexity and poorer reconstruction.", + "bbox": [ + 84, + 361, + 374, + 375 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Acoustic tokenizer refers to audio codec models, trained for acoustic-level reconstruction tasks. Audio codec (Zeghidour et al., 2021; Defossez et al., 2022; Yang et al., 2023b; Kumar et al., 2023) have demonstrated exceptional performance in reconstructing high-quality audio. In general, these codec models consist of an encoder, a quantizer, and a decoder. Both the encoder and decoder are lightweight, resulting in minimal inference costs. Compared to semantic tokens, codec models can support audio, speech, and music domains, and their rich acoustic details mitigate the need for cascading architectures in downstream generative models. Recently, an increasing number of audio codec models have been proposed, focusing on (1) Better reconstruction quality, such as DAC (Kumar et al., 2023), Vocos (Siuzdak, 2023), SQ-Codec (Yang et al., 2024c;b) and APCodec (Ai et al., 2024); (2) Low-bitrate models, such as HiFiCodec (Yang et al., 2023b), wavtokenizer (Ji et al., 2024), StableCodec (Parker et al., 2024), and TS3-Coded (Wu et al., 2024); (3) Task-driven codec, designed for text-to-speech tasks, such as FACodec (Ju et al., 2024), SpeechTokenizer (Zhang et al., 2023), Single-Coded (Li et al., 2024), audio retrieval-based Tokenizers (Banerjee & Arora, 2022; van Niekerk et al., 2024). In this study, we focus on developing a low-bitrate, semantically rich audio codec tokenizer. The most closely related work to ours is MimiCodec (Defossez et al., 2024), which provides high-quality semantic information while achieving a low bitrate (1.1 kbps). However, MimiCodec relies on knowledge distillation from WavLM (Chen et al., 2022a) to the first VQ layer, whereas the remaining VQ layers do not incorporate semantic information. Furthermore, it is specifically designed for speech tasks and has not been validated for non-speech tasks, such as sound and music generation. In contrast to MimiCodec, our ALMTokens encode more semantic information across all VQ layers, achieves a lower bitrate, and is designed for both speech and", + "bbox": [ + 84, + 376, + 475, + 905 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "general sound.", + "bbox": [ + 496, + 361, + 598, + 376 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Proposed Method", + "text_level": 1, + "bbox": [ + 496, + 395, + 671, + 412 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This section introduces the technical details of the proposed ALMTokensizer. Section 3.1 presents the framework of previous audio codec models. Section 3.2 presents the details of proposed audio codec framework. In Sections 3.3 and 3.4, we present the training loss and training strategies.", + "bbox": [ + 496, + 421, + 888, + 497 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Preliminary", + "text_level": 1, + "bbox": [ + 496, + 513, + 616, + 529 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Previous audio codec (Défossez et al., 2022; Zeghidour et al., 2021) typically adopt an encoder-quantizer-decoder framework, as shown in the left part of Figure 2. The audio is encoded into several audio frames by the encoder. Then, residual vector quantization (RVQ) (Zeghidour et al., 2021) is used to quantize these audio frames. Lastly, the decoder is used to recover the waveform from the quantized audio frames. It can be observed that previous works treat each audio frame equally and rely on these quantized frames to recover the audio. However, such a strategy (1) ignores the fact that different audio frames encode different levels of information, which results in some audio frames being difficult to recover in low-bitrate settings (e.g., encoding the audio frames at $12.5\\mathrm{Hz}$ ); (2) fails to utilize the context information between different frames.", + "bbox": [ + 495, + 537, + 887, + 763 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Query-based Audio Compression", + "text_level": 1, + "bbox": [ + 496, + 780, + 764, + 796 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To construct a low-bitrate, semantically rich audio codec model, we propose a query-based compression strategy. Our approach is inspired by the success of MAE (He et al., 2022), which applies a masking operation to the original image with a high mask rate (75%). With the help of a transformer encoder and decoder, it is possible to recover the masked", + "bbox": [ + 495, + 803, + 888, + 893 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling", + "bbox": [ + 207, + 56, + 764, + 70 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "image content by utilizing the context information between different patches. Thus, we propose using a group of query tokens ${}^{2}$ to capture holistic audio context information from the audio frames with the assistance of a transformer encoder. Since these query tokens include rich context information, it is possible to reconstruct the audio based on them. Then, a transformer decoder and mask tokens are employed to reconstruct the audio from the quantized query tokens. This strategy leverages the powerful modeling capabilities of transformers to achieve better compression and semantic modeling. Similar query-based strategies has been widely explored in previous works, such as BLIP2 (Li et al., 2023), SALMONN (Tang et al., 2024) and TiTok(Yu et al., 2024). The right part of Figure 2 illustrates the overall framework of ALMTokensizer. In the following sections, we detail each component and the associated training loss.", + "bbox": [ + 84, + 85, + 475, + 327 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Patchify and UnPatchify We explore two types of Patchify modules: (1) Following Encodec (Défossez et al., 2022), a convolution-based module, which encodes the audio data $\\mathbf{x}$ into $e \\in \\mathcal{R}^{T \\times d}$ , where $T$ and $d$ denote the number of frames and the vector dimension, and (2) Following StableCodec (Parker et al., 2024), which directly uses a linear layer to encode the audio data into $e \\in \\mathcal{R}^{T \\times d}$ and adds several transformer layers. Similarly, the UnPatchify mirrors the architecture of Patchify. If we use the Encodec-style Patchify module, the UnPatchify module substitutes stride convolutions with transposed convolutions and reverses the stride order. If we use the StableCodec-style Patchify module, the UnPatchify module includes a transformer block and a reshape operation. In our preliminary experiments, we find that the Encodec-style Patchify and UnPatchify modules bring better reconstruction performance. We adopt the Encodec-style Patchify module as our default setting.", + "bbox": [ + 84, + 333, + 477, + 592 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Token Interleaving The token interleaving module aims to combine two token sequences into a single sequence. In the encoder part, we combine the audio frames $e \\in \\mathcal{R}^{T \\times d}$ and the query token [CLS]. Assuming a window size of $w$ , the query token will be inserted into the audio frame sequence at every $w$ -intervals. In the decoder part, the token interleaving module is used to combine the quantized query tokens and learnable mask tokens. We insert $w$ mask tokens before each query token. During the training stage, we dynamically choose the window size for each training iteration.", + "bbox": [ + 84, + 598, + 475, + 763 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Token Retrieval The token retrieval module aims to retrieve the relevant tokens from a sequence. In the encoder part, we use it to retrieve the learnable query tokens. In the decoder part, we use it to retrieve the learnable mask tokens.", + "bbox": [ + 84, + 771, + 473, + 832 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Query-based Transformer Encoder As the previous part", + "bbox": [ + 84, + 839, + 473, + 854 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "discussed, we introduce a learnable query token $[\\mathrm{cls}] \\in \\mathcal{R}^{1 \\times d}$ to capture holistic information from the audio frames $e$ . As Figure 2 shows, we first combine the audio frames and query token using a token interleaving module with a window size $w$ . Then, a transformer module is applied to model the whole sequence $e_a$ . After that, we employ a token retrieval module to extract the query tokens $h \\in \\mathcal{R}^{[T / w] \\times d}$ .", + "bbox": [ + 496, + 84, + 885, + 190 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\boldsymbol {e} = P (\\boldsymbol {x}), \\boldsymbol {e} _ {\\boldsymbol {a}} = I n t e r l e a v i n g (\\boldsymbol {\\mathbf {e}}, \\boldsymbol {c l s}, w), \\tag {1} \\\\ \\boldsymbol {e} _ {\\boldsymbol {a}} = E n (\\boldsymbol {e} _ {\\boldsymbol {a}}), \\boldsymbol {h} = R e c t r i e v a l (\\boldsymbol {e} _ {\\boldsymbol {a}}, w) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 545, + 196, + 885, + 233 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $P(\\cdot)$ denotes the Patchify module. $En(\\cdot)$ denotes the transformer encoder.", + "bbox": [ + 496, + 239, + 885, + 270 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Residual Vector Quantization To build a low-bitrate audio codec, we empirically set the number of RVQ layers to 3, since we found that 3 RVQ layers suffice to build an effective audio codec model: $\\hat{h} = Q(h)$ . Inspired by previous works (Zhu et al., 2024; Yang et al., 2024a), we first obtain the k-means clusters of Wav2vec2 (Baevski et al., 2020) to represent the speech semantic prior, and the k-means clusters of the BEATs (Chen et al., 2022b) to represent the general sound semantic prior. Assuming the codebook size is $C$ , we set $C / 2$ to represent speech, with the remaining portion representing general sound. We then use these semantic priors to initialize the codebook of the VQ layer and fix it. Next, we apply a linear layer to map the input features into the VQ layer.", + "bbox": [ + 496, + 277, + 885, + 489 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Query-based Transformer Decoder To recover the audio information, we construct a reverse process using the encoder part. We first use the token interleaving module to combine the mask token $m \\in \\mathcal{R}^{1 \\times d}$ with $\\hat{\\pmb{h}}$ . The new sequence is then modeled by a transformer module. We expect that these mask tokens can be used to recover the audio information with the help of the Unpatchify module.", + "bbox": [ + 495, + 497, + 885, + 603 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\boldsymbol {q} _ {\\boldsymbol {a}} = \\text {I n t e r l e a v i n g} (\\hat {\\boldsymbol {h}}, \\boldsymbol {m}, w), \\boldsymbol {q} _ {\\boldsymbol {a}} = D e (\\boldsymbol {q} _ {\\boldsymbol {a}}) \\tag {2} \\\\ \\boldsymbol {e} _ {\\boldsymbol {o}} = \\operatorname {R e c t r i e v a l} (\\boldsymbol {q} _ {\\boldsymbol {a}}, w), \\hat {\\boldsymbol {x}} = U n P (\\boldsymbol {e} _ {\\boldsymbol {o}}), \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 539, + 609, + 885, + 646 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $Unp(\\cdot)$ denotes the Unpatchify module. $De(\\cdot)$ denotes the transformer decoder.", + "bbox": [ + 496, + 652, + 885, + 683 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Training Loss", + "text_level": 1, + "bbox": [ + 496, + 700, + 627, + 715 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Similar to previous audio CODECs, our approach is based on a GAN objective, where we optimize both the generator (which consists of the Patchify module, transformer encoder, quantizer, transformer decoder, and UnPatchify module) and the discriminators. For the generator, the training loss comprises four components: (1) reconstruction loss term; (2) adversarial loss term; (3) Masked AutoEncoder (MAE) loss; and (4) AR prediction loss. The reconstruction and adversarial losses typically follow previous works (Défossez et al., 2022; Zeghidour et al., 2021). In the following, we describe the MAE loss and AR prediction loss. More details of training loss refer to Appendix G.", + "bbox": [ + 495, + 724, + 885, + 905 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling", + "bbox": [ + 207, + 56, + 763, + 70 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "2Query tokens are learnable embedding vectors that are updated throughout the training process.", + "bbox": [ + 84, + 863, + 473, + 891 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "MAE Loss As we discussed in Section 1, a semantic-rich audio codec tokenizer is better suited for audio language modeling. Inspired by the success of MAE (He et al., 2022), we propose to incorporate an MAE loss during the training of the audio codec. Specifically, for the frame sequence $e$ , we randomly choose several audio frame features and set these frames to zero, $e_m = \\mathrm{Mask}(e)$ . We pass the masked features $e_m$ into the encoder transformer. Then, the encoded features are passed into an MAE-decoder transformer block to predict $e$ . In our experiments, we adopt a dynamic mask rate (from 0.2 to 0.3), we found that using a large mask rate will significantly influence the reconstruction performance. Following MAE (He et al., 2022), we apply the MSE loss to the masked audio frames.", + "bbox": [ + 84, + 84, + 475, + 296 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "AR Loss As shown in figure 3, we find that the first layer of RVQ-based audio codec models is easier to fit for the audio language model than the other layers (e.g., layers 2 and 3). One possible reason is that the first layer encodes more semantically related information. For speech data, most of the content information can be recovered by the first VQ layer, while the residual layers primarily encode acoustic-level information, which influences speech quality. To make the tokens in the residual layer easier to fit, we introduce an autoregressive (AR) prediction prior (Wang et al., 2024a) in the RVQ latent space. Specifically, we introduce a lightweight continuous autoregressive (AR) transformer3, which is used to conduct next-token prediction in the RVQ layer. For example, it is tasked with predicting the quantized feature of the third VQ layer based on the features of the first and second VQ layers. We use mean squared error (MSE) loss for optimization.", + "bbox": [ + 84, + 303, + 477, + 561 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4. Two-stage Training Strategy", + "text_level": 1, + "bbox": [ + 84, + 577, + 316, + 593 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Although training the ALMTokenizer using the typical Encoder (Défossez et al., 2022) setting is feasible, we introduce a two-stage training paradigm to improve both reconstruction performance and semantic information. Our motivation stems from the fact that audio codec quantization focuses on modeling local relationships, whereas semantic information focuses on modeling global relationships. These two goals are in conflict. To resolve this conflict, we present a two-stage training strategy. In the first stage, we do not incorporate the quantization part; instead, we train directly an AutoEncoder with Patchify and UnPatchify modules. To encode more semantic information in the Patchify module, we introduce MAE loss during this stage, by adding transformer-based MAE-encoder and decoder. The encoder processes the masked frame sequence, and the decoder pre", + "bbox": [ + 84, + 599, + 475, + 828 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "dicts the masked part. After training, the transformer encoder and decoder are discarded. In the second stage, we first initialize the ALMTokensizer's Patchify and UnPatchify modules with the checkpoint from the first stage, and freeze the parameters of the Patchify module. Then, we train the model using the training loss described in Section 3.3.", + "bbox": [ + 496, + 84, + 885, + 176 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 496, + 195, + 629, + 210 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Dataset and Training Details", + "text_level": 1, + "bbox": [ + 496, + 220, + 732, + 234 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Data preparation for the audio codec ALMTokensizer is trained on approximately 4,500 hours of data. In the speech domain, we utilize LibriTTS training set (Zen et al., 2019) and a subset of Multilingual LibriSpeech (MLS) (Pratap et al., 2020), with 2,000 hours randomly selected. In the sound domain, we utilize a subset of AudioSet, with 1,000 hours randomly selected; in the music domain, we employ a subset of the Million Song Dataset (Bertin-Mahieux et al., 2011), also with 1,000 hours randomly selected. We evaluate the codec's speech reconstruction performance using a subset of the VCTK dataset (Veaux et al., 2017), and assess both audio and music reconstruction performance using the AudioCaps (Kim et al., 2019) validation set and the MusicCaps dataset (Agostinelli et al., 2023), respectively.", + "bbox": [ + 495, + 244, + 887, + 455 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Data for Audio Language Models To assess the effectiveness of the proposed audio tokenizer, we construct an audio language model framework to perform six audio-related tasks. The details are provided in Appendix D.3 and D.4. For speech data, we select 2,000 hours of speech-text pairs from LibriHeavy (Kang et al., 2024). For sound data, we utilize the AudioCaps training set and BBC Sound Effects. For music data, we use a subset of the Million Song dataset and the caption data from LP-MusicCaps (Doh et al., 2023).", + "bbox": [ + 495, + 463, + 888, + 599 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Implementation Details ALMTokenizer first performs patchification on the audio data, we set the patch size to 320 in all of experiments, which encodes 1 second of $24\\mathrm{kHz}$ audio into 75 frames. For the Encoder-style Patchify module, we adopt the settings from Encodec (Défossez et al., 2022) encoder. To enable streaming for the codec model, a causal convolution layer is employed. For the encoder-transformer and decoder-transformer components, we use 24 self-attention layers, with latent dimensions of 256 and 512, respectively. Following StableCodec (Parker et al., 2024), the self-attention mechanism uses a causal sliding attention window of 64 steps to restrict the receptive field and promote the generalization of the architecture to sequences of arbitrary length. Rotary Positional Embeddings (RoPE) are used. Refer to Appendix G for the details of ALMTokenizer model training. For the audio language model, we follow the framework of Moshi (Défossez et al., 2024). For further details, refer to Appendix A.", + "bbox": [ + 495, + 606, + 888, + 878 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling", + "bbox": [ + 207, + 56, + 761, + 70 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "3The term continuous autoregressive (AR) transformer is used to distinguish our approach from traditional discrete AR models, which operate on discrete token sequences and are optimized using cross-entropy loss. In our study, to facilitate gradient backpropagation, we apply the AR transformer directly to continuous features.", + "bbox": [ + 84, + 835, + 475, + 902 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/082256a1f928b9b771e0604046246e9c0a0de124d1e9ec2d048c5dcfe42c390e.jpg", + "table_caption": [ + "Table 1. The speech reconstruction and semantic performance comparison between the ALMTokensizer and previous tokenizers. FPS denotes that the frame number in one second. TPS denotes that the token number in one second. CS denotes the codebook size, BR denotes the bit-rate. ST denotes speechtokenizer. Bold for the best result and underline for the second-best result. Evaluation on VCTK dataset." + ], + "table_footnote": [], + "table_body": "
ModelsFPS/TPSCS/BRReconstructionSemantic
UTMOS (↑)DNS-MOS (↑)VISQOL (↑)STOI (↑)PESQ (↑)ASR (↓)ER (↑)
Hubert (Hsu et al., 2021)-------6.531.0
WavLM (Chen et al., 2022a)-------6.229.0
Encodec (Défossez et al., 2022)50/1501024/1.5kbps2.583.273.640.812.035.326.5
DAC (Kumar et al., 2023)50/1501024/1.5kbps3.133.413.670.812.144.117.6
Wavtokenizer (Ji et al., 2024)40/404096/0.48kbps3.673.503.720.791.944.619.8
StableCodec (Parker et al., 2024)25/2546656/0.4kbps4.223.643.400.761.898.315.8
ST (Zhang et al., 2023)50/1501024/1.5kbps3.413.363.680.791.719.827.0
Mimi (Défossez et al., 2024)12.5/37.52048/0.41kbps3.013.143.280.751.525.128.0
Mimi (Défossez et al., 2024)12.5/1002048/1.1kbps3.653.383.820.822.123.828.3
ALMTokensizer (Ours)12.5/37.52048/0.41kbps3.763.643.780.812.018.329.0
", + "bbox": [ + 88, + 146, + 910, + 308 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Evaluation Metrics", + "text_level": 1, + "bbox": [ + 84, + 332, + 253, + 345 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We evaluate the performance of previous SOTA audio tokenizers, and our proposed ALMTokensizer across audio reconstruction, audio semantic information, audio understanding, and audio generation tasks.", + "bbox": [ + 84, + 356, + 475, + 417 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Audio Reconstruction For speech reconstruction, we use DNS-MOS, UT-MOS, PESQ, STOI (Short-time Objective Intelligibility), and VISQOL. For sound and music data evaluation, VISQOL (audio version), STFT loss, and Mel loss are used. Furthermore, following (Kumar et al., 2023), the MUSHRA subjective test is conducted for speech, sound, and music. Refer to Appendix D for more details.", + "bbox": [ + 84, + 424, + 473, + 530 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Audio Semantic Information Previous SSL models, such as Hubert (Hsu et al., 2021), have shown that semantic-rich representation can be used to solve downstream recognition tasks by fine-tuning several adaptor layers. Thus, we can validate the performance of features of the audio tokenizer for downstream recognition tasks. For speech data, we conduct the automatic speech recognition (ASR) task on the LibriSpeech (Panayotov et al., 2015) dataset, and the emotion classification (EC) task on the EMOVO (Costantini et al., 2014) dataset. For sound data, we conduct sound classification tasks on the ESC-50 dataset (Piczak, 2015). For music data, we conduct music classification tasks on the Medley-solos-DB dataset (Lostanlen & Cella, 2016).", + "bbox": [ + 84, + 537, + 473, + 734 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Audio Understanding To further validate whether the audio tokenizer is suitable for building an audio language model, we propose to conduct an understanding task using discrete tokens. We conduct three tasks: ASR, audio caption, and music caption. For the audio data, we use the audio tokenizer to transform it into discrete tokens, and for text data, we use the BPE tokenizer of LLAMA 3.2. For audio and music caption, we follow (Drossos et al., 2020) and adopt BLEU-1, BLEU-2, BLEU-3, METEOR, ROUGE-L, CIDEr-D, SPICE, and SPIDER metrics.", + "bbox": [ + 84, + 741, + 473, + 891 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/bc38bc37fde7e41eb41f137a017389aa04332f4882f0f1624d7d8a673e5c16c8.jpg", + "table_caption": [ + "Table 2. The sound reconstruction performance comparison between the proposed ALMTokensizer and previous audio tokenizer models. SC denotes the sound classification task. Evaluation on AudioCaps validation set." + ], + "table_footnote": [], + "table_body": "
ModelsViSQOL (↑)Mel loss (↓)STFT loss (↓)SC (↑)
BEATs---24%
Wav2vec2---53%
Encodec3.0516.31.2315%
DAC2.9817.61.2420%
Wavtokenizer2.1832.72.5012%
Ours2.9915.01.2444%
", + "bbox": [ + 501, + 378, + 883, + 473 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/b85bef3806853a9e62f3afbd2d853354b00e87704dab5446ce7e4c2bfb7a5fe5.jpg", + "table_caption": [ + "Table 3. The music reconstruction and semantic performance comparison between the ALMTokensizer and previous audio tokenizers. MC denotes the music classification task. Evaluation on Musiccaps dataset." + ], + "table_footnote": [], + "table_body": "
ModelsViSQOL (↑)Mel loss (↓)STFT loss (↓)MC (↑)
BEATs---54%
Wav2vec2---65%
Encodec4.0434.81.2645%
DAC4.0635.91.2848%
Wavtokenizer3.8548.21.4754%
Ours3.9634.41.3259%
", + "bbox": [ + 501, + 560, + 885, + 655 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Audio Generation We also conduct audio generation tasks, including text-to-speech, text-to-sound, and text-to-music. Refer to Appendix D for more details.", + "bbox": [ + 496, + 684, + 885, + 729 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. The Reconstruction and Semantic Performance", + "text_level": 1, + "bbox": [ + 496, + 746, + 861, + 760 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We first compare the reconstruction and semantic performance of ALMTokensizer with previous audio tokenizers. Table 1 presents the speech reconstruction and semantic results. We observe the following: (1) In terms of reconstruction, ALMTokensizer achieves impressive results in the low-bitrate setting. For example, compared with previous SOTA models, MimiCodec and Wavtokenizer, ALMTokensizer achieves better reconstruction performance at a lower bitrate. We also note that StableCodec performs well on UT-", + "bbox": [ + 495, + 768, + 885, + 904 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling", + "bbox": [ + 207, + 56, + 763, + 70 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/13fe390d7e322759f0fba09a33c1158fc9542e63e8aee9389f8bf854e645d46b.jpg", + "table_caption": [ + "Table 4. The LM-based TTS and ASR results. The first three metrics are used for TTS, while the last one is used for ASR. GLM4-Voice (Zeng et al., 2024) is a single layer semantic tokenizer. Evaluation on LibriSpeech test clean set." + ], + "table_footnote": [], + "table_body": "
ModelsWER (↓)DNSMOS (↑)UT-MOS (↑)ASR (↓)
GLM4-voice9.93.963.7916.3 ± 1.5
DAC24.53.142.0658.4 ± 1.2
Encodec22.93.482.1477.2 ± 2.3
StableCodec22.73.633.7028.0 ± 1.9
Wavtokenizer18.53.723.5845.6 ± 2.7
MimiCodec16.03.672.9323.1 ± 1.5
Ours11.73.753.8819.6 ± 1.8
", + "bbox": [ + 88, + 148, + 478, + 253 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "MOS. The main reason is that StableCodec has denoising capabilities, while the original audio includes some noise. This explains why StableCodec achieves good results on UTMOS but performs poorly on PESQ and STOI. (2) In terms of semantic information, ALMTokensizer demonstrates superior performance, e.g., ALMTokensizer outperforms previous SOTA models, such as Wavtokenizer and StableCodec $^{4}$ . Notably, in the emotion classification task, ALMTokensizer achieves performance comparable to previous SSL models, such as Hubert and WavLM. However, we also note that ALMTokensizer still lags behind these SSL models in ASR performance. We speculate that the inclusion of acoustic information may detract from ASR performance, despite ALMTokensizer containing rich semantic information. Table 2 and 3 show the sound and music experimental results. We can see that ALMTokensizer demonstrates strong reconstruction performance under the low-bitrate setting. Compared to WavTokenizer, the reconstruction performance shows significant improvement. Furthermore, we also note that sound and music are inherently more complex than speech, and encoding them at very low-bitrate remains a challenge. In terms of semantic information, ALMTokensizer significantly surpasses previous works, such as WavTokenizer and Encodec. In comparison with SSL models, BEATs (Chen et al., 2022b) and Wav2vec2-audioset version, ALMTokensizer shows comparable performance. We also perform the MUSHRA subjective test for the reconstruction performance. As shown in Table 7, we find that ALMTokensizer effectively maintains strong subjective reconstruction performance on speech, music, and audio, even with a very low-bitrate setting.", + "bbox": [ + 84, + 285, + 475, + 753 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4. Audio Understanding and Generation Results", + "text_level": 1, + "bbox": [ + 84, + 770, + 437, + 784 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Speech Understanding and Generation Tasks Table 4 shows the LM-based TTS and ASR results. For the TTS task, we mainly focus on robustness and speech quality. In terms of robustness, we can see that the GLM4-voice tokenizer (Zeng et al., 2024), MimiCodec, and the pro", + "bbox": [ + 84, + 792, + 475, + 869 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d14fe6162b3ac4c03543d5fc61d9d14b856032b8180431b1c829610ad257161d.jpg", + "image_caption": [ + "Figure 3. The performance comparison with or without AR loss." + ], + "image_footnote": [], + "bbox": [ + 529, + 84, + 851, + 266 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "posed ALMTokensizer bring better performance than others, highlighting the importance of semantic information for LM-based speech generation. Compared to previous audio codec tokenizers, ALMTokensizer brings significant improvement. In terms of generated speech quality, ALMTokensizer also shows great advantages, further demonstrating that the proposed tokenizer is more suitable for audio language modeling. Similarly, when we conduct the ASR task using discrete tokens as input, semantic information is also important. Traditional audio codec models perform poorly in this setting, such as DAC, Encodec, and WavTokenizer. StableCodec was fine-tuned by using a CTC head to predict the force-aligned phoneme tags from pre-bottleneck latents. MimiCodec distills the semantic information from WavLM. Thus, they have better performance than previous codec models. In ALMTokensizer, we propose a novel codec framework and training loss to better encode semantic information in the codec model.", + "bbox": [ + 495, + 329, + 887, + 599 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Sound/music Understanding and Generation Results We conduct text-to-sound, text-to-music, audio caption and music caption tasks within the same audio language model framework. The experimental results shown in Table 5 indicate that ALMTokensizer shows better performance in both audio caption and audio generation tasks, further demonstrating its advantages. We put more audio tokenizer reconstruction performance experiments on Appendix F, including evaluation on LibriTTS test set, length generalization, and compared to diffusion-based audio codec models.", + "bbox": [ + 495, + 608, + 888, + 758 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.5. Ablation Study", + "text_level": 1, + "bbox": [ + 496, + 776, + 635, + 791 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In order to gain a more comprehensive understanding of ALMTokensizer, we systematically compared each key component using a controlled experimental setup, employing identical architectures and hyperparameters across all trials. The Effectiveness of Query-based Audio Compression In this study, we propose a query-based audio compression strategy for compressing audio data in a very low-bitrate", + "bbox": [ + 495, + 799, + 887, + 905 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling", + "bbox": [ + 207, + 56, + 761, + 70 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "4StableCodec's feature dimension is 6, it is hard to apply it for down-streaming task by simple fine-tuning", + "bbox": [ + 84, + 878, + 475, + 905 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/ef002626f27bb198e207f58573c778f26f9a135585bdd4fb9d3fb8064870efa7.jpg", + "table_caption": [ + "Table 5. The LM-based sound, music understanding and generation. B1, B2, B3, RG, ME, CD, SP, and SD denote BLEU-1, BLEU-2, BLEU-3, METEOR, ROUGE-L, CIDEr-D, SPICE, and SPIDER, respectively. Evaluation on Audiocaps and Musiccaps datasets." + ], + "table_footnote": [], + "table_body": "
ModelsUnderstandingGeneration
B1 (↑)B2(↑)B3 (↑)ME (↑)RG (↑)CD (↑)SP (↑)SD (↑)FD (↓)FAD (↓)KL (↓)
Sound Task
Encodec0.250.150.080.110.240.570.140.3510.038.221.73
DAC0.260.150.080.110.260.510.130.3214.1411.71.55
Wavtokenizer0.240.140.080.100.220.380.110.256.764.551.28
ALMTokensizer (Ours)0.280.170.110.120.240.600.150.374.116.160.55
Music Task
Encodec0.300.140.080.110.230.370.090.237.225.481.06
DAC0.290.140.080.110.230.370.090.2312.898.361.68
Wavtokenizer0.190.060.020.060.130.060.050.054.3911.930.88
ALMTokensizer (Ours)0.340.150.070.130.250.440.100.273.554.580.43
", + "bbox": [ + 102, + 119, + 869, + 273 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/1867ab2fb144b56a586a497203f019a503a17af3cdbe1769e511357e997096e0.jpg", + "table_caption": [ + "Table 6. Ablation study of codec framework, training loss, and training strategy. ASR and ER are used to evaluate the semantic information. The others are used to evaluate the reconstruction performance. Experiments conduct on VCTK dataset." + ], + "table_footnote": [], + "table_body": "
SettingUTMOS (↑)DNSMOS (↑)VISQOL (↑)PESQ (↑)STOI (↑)ASR (↓)ER (↑)
ALMTokensizer3.763.643.782.00.8118.329.0
Framework ablation
w/o the query-based framework2.493.133.371.580.7734.522.6
w/o Three additional loss3.543.413.441.690.7827.224.5
Training loss ablation
w/o semantic prior for VQ3.793.663.782.120.8319.228.4
w/o MAE loss3.703.763.832.100.8224.523.2
w/o AR loss3.723.813.802.080.8218.830.2
Different Patchify module
use Linear-Patchify3.473.363.271.780.7820.326.7
Training strategy ablation
w/o two-stage training3.603.393.241.550.7422.825.9
", + "bbox": [ + 101, + 324, + 869, + 526 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "setting. To validate its effectiveness, we follow previous audio codec models, such as MimiCodec (Défossez et al., 2024). In the encoder part, we use a stride size of [8, 6, 5, 4, 2] to compress 1-second, $24\\mathrm{kHz}$ audio into $12.5\\mathrm{Hz}$ , followed by applying 3 RVQ layers to quantize it. As shown in Table 6, using previous audio codec frameworks makes it difficult to maintain good reconstruction performance in very low-bitrate settings. As a result, the proposed query-based compression method is more effective in this setting.", + "bbox": [ + 84, + 551, + 475, + 686 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The Influence of Semantic Prior for VQ To explore the influence of semantic priors on the audio codec model, we conduct an experiment where we remove the semantic prior and instead train a learnable RVQ following Encodec. As shown in Table 6, we find that updating the RVQ layer improves reconstruction performance but reduces semantic information, demonstrating that integrating semantic priors into the VQ layer enhances semantic information.", + "bbox": [ + 84, + 686, + 473, + 808 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The Influence of MAE Loss We also conduct experiments to evaluate the effectiveness of the MAE loss. As shown in Table 6, we find that the MAE loss is crucial for enhancing the semantic information in the codec model. Although the MAE loss has a slight negative effect on reconstruction, it is a crucial factor in building a better audio tokenizer.", + "bbox": [ + 84, + 809, + 473, + 898 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The Influence of AR Loss From Table 6, we observe that adding the AR loss reduces reconstruction performance. In Figure 3, we compare token prediction accuracy and TTS performance with and without LM loss. We observe that using LM loss significantly improves token prediction accuracy, particularly for the second and third VQ layers, which shows the effectiveness of our motivation and solution.", + "bbox": [ + 495, + 551, + 885, + 657 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The Influence of Two-stage Training As Table 6 shows, the two-stage training strategy is crucial as it significantly improves reconstruction performance and semantic information in the codec model. The Influence of Patchify Module We investigate two types of Patchify modules: Encode-style and StableCodec-style. As shown in Table 6, using Encode-style Patchify modules yields better performance. One possible reason is that StableCodec-style Patchify modules (Parker et al., 2024) may depend on larger data and model parameters, as the original paper scales their model to 1B. In contrast, we use only four transformer layers to ensure a fair comparison with Encode-style modules. Due to page limitations, we defer the ablation study on the influence of window size $w$ in query-based compression, codebook size, the influence of mask-rate, and model size on reconstruction to Appendix C.", + "bbox": [ + 495, + 657, + 885, + 900 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling", + "bbox": [ + 207, + 56, + 761, + 70 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/3c483e95f4e46d9b474b355afbc8f55a9279df8ed331b640ead0025710bc583e.jpg", + "table_caption": [ + "Table 7. The subjective reconstruction results using MUSHRA (comparative scoring of samples) of codec models on speech, sound and music. Bold for the best result and underline for the second-best result." + ], + "table_footnote": [], + "table_body": "
ModelsFPS/TPSCS/BRSpeech (↑)Sound (↑)Music (↑)
Speech
MimiCodec (3 RVQ) (Défossez et al., 2024)12.5/37.52048/0.41kbps65.61 ± 5.2--
MimiCodec (8 RVQ) (Défossez et al., 2024)12.5/1002048/1.1kbps86.7 ± 2.3--
StableCodec (Parker et al., 2024)25/2546656/0.4kbps81.7 ± 4.4--
SpeechTokenizer (Zhang et al., 2023)50/1501024/1.5bps73.7 ± 4.6--
Audio
Encodec (Défossez et al., 2022)50/1501024/1.5bps75.1 ± 3.977.2 ± 4.273.7 ± 4.6
DAC (Kumar et al., 2023)50/1501024/1.5bps79.3 ± 4.271.3 ± 4.171.3 ± 4.1
Wavtokenizer (Défossez et al., 2022)40/404096/0.48bps84.0 ± 2.163.1 ± 4.654.1 ± 5.4
Ours12.5/37.52048/0.41kbps84.8 ± 3.772.4 ± 4.769.0 ± 4.5
", + "bbox": [ + 129, + 126, + 844, + 292 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.6. Discussion", + "text_level": 1, + "bbox": [ + 84, + 318, + 192, + 332 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this section, we discuss two fundamental questions in audio tokenization. Question 1: Is a single quantization layer better than multiple quantization layers? Question 2: Does a low-bit rate with high reconstruction performance define a good audio tokenizer?", + "bbox": [ + 84, + 342, + 473, + 416 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Question 1 Although WavTokenizer and StableCodec demonstrate the potential to build a low-bitrate audio codec tokenizer with a single quantization layer, they rely on a higher frame rate (e.g., 25 or $40\\mathrm{Hz}$ ). As shown in Figure 1, a lower frame rate (e.g., $12.5\\mathrm{Hz}$ ) is critical for improving training efficiency. Thanks to UniAudio (Yang et al., 2023c) and Moshi's (Défossez et al., 2024) audio language model framework, multiple quantization layers do not increase the sequence length. Therefore, multiple quantization layers present an effective approach for building a low-bitrate, semantically rich audio codec.", + "bbox": [ + 84, + 417, + 475, + 583 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Question 2 To address this question, we present two comparisons. First, as shown in Tables 4 and 1, StableCodec exhibits better reconstruction performance and a lower bit-rate compared to WavTokenizer. However, when applied to the text-to-speech generation task, WavTokenizer demonstrates better robustness. One possible reason for this is that StableCodec uses a large-scale codebook size (46,656), which may increase the modeling complexity. Second, although MimiCodec has a higher bit-rate and poorer reconstruction performance than StableCodec, it demonstrates more stable TTS generation performance and better ASR performance. This phenomenon further underscores the importance of semantic information. In summary, a good audio tokenizer for an audio language model should not only consider low-bitrate and reconstruction, but also account for the semantic information in the codec model.", + "bbox": [ + 84, + 584, + 477, + 824 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 84, + 844, + 205, + 859 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this study, we present a low-bitrate, semantically rich audio codec tokenizer. Specifically, we propose a query-based", + "bbox": [ + 84, + 869, + 475, + 902 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "compression strategy to effectively compress the audio data into a low-bitrate format while incorporating more semantic information. Furthermore, we introduce several training losses to enhance semantic information, including MAE loss and AR loss. Extensive experiments demonstrate the effectiveness of ALMTokensizer. Within the same audio language modeling framework, ALMTokensizer exhibits superior performance in both understanding and generation tasks. We discuss the limitation of this study in Appendix I.", + "bbox": [ + 495, + 318, + 887, + 455 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Ethical Statement", + "text_level": 1, + "bbox": [ + 496, + 473, + 653, + 489 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "This paper presents an audio tokenizer for audio language models, which can be applied to various audio generation tasks, such as text-to-speech and text-to-music. There is potential for misuse in generating misinformation, deepfake audio, or other harmful content. We advocate for the development of a detection model to identify audio produced by the codec model and generated by other generative models.", + "bbox": [ + 495, + 500, + 888, + 604 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 496, + 625, + 594, + 641 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Agostinelli, A., Denk, T. I., Borsos, Z., Engel, J., Verzetti, M., Caillon, A., Huang, Q., Jansen, A., Roberts, A., Tagliasacchi, M., et al. Musicl: Generating music from text. arXiv preprint arXiv:2301.11325, 2023.", + "Ai, Y., Jiang, X.-H., Lu, Y.-X., Du, H.-P., and Ling, Z.-H. Apocodec: A neural audio codec with parallel amplitude and phase spectrum encoding and decoding. arXiv preprint arXiv:2402.10533, 2024.", + "Baevski, A., Zhou, Y., Mohamed, A., and Auli, M. wav2vec 2.0: A framework for self-supervised learning of speech representations. Advances in neural information processing systems, 33:12449-12460, 2020.", + "Banerjee, A. and Arora, V. wav2tok: Deep sequence tokenizer for audio retrieval. In The Eleventh International Conference on Learning Representations, 2022." + ], + "bbox": [ + 496, + 648, + 888, + 905 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling", + "bbox": [ + 207, + 56, + 763, + 70 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Bertin-Mahieux, T., Ellis, D. P., Whitman, B., and Lamere, P. The million song dataset. In Proceedings of the 12th International Conference on Music Information Retrieval (ISMIR 2011), 2011.", + "Borsos, Z., Marinier, R., Vincent, D., Kharitonov, E., Pietquin, O., Sharifi, M., Roblek, D., Teboul, O., Grangier, D., Tagliasacchi, M., et al. Audiolm: a language modeling approach to audio generation. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 2023a.", + "Borsos, Z., Sharifi, M., Vincent, D., Kharitonov, E., Zeghidour, N., and Tagliasacchi, M. Soundstorm: Efficient parallel audio generation. arXiv preprint arXiv:2305.09636, 2023b.", + "Chen, S., Wang, C., Chen, Z., Wu, Y., Liu, S., Chen, Z., Li, J., Kanda, N., Yoshioka, T., Xiao, X., et al. Wavlm: Large-scale self-supervised pre-training for full stack speech processing. IEEE Journal of Selected Topics in Signal Processing, 16(6):1505-1518, 2022a.", + "Chen, S., Wu, Y., Wang, C., Liu, S., Tompkins, D., Chen, Z., and Wei, F. Beats: Audio pre-training with acoustic tokenizers. arXiv preprint arXiv:2212.09058, 2022b.", + "Copet, J., Kreuk, F., Gat, I., Remez, T., Kant, D., Synnaeve, G., Adi, Y., and Defossez, A. Simple and controllable music generation. arXiv preprint arXiv:2306.05284, 2023.", + "Costantini, G., Iaderola, I., Paoloni, A., Todisco, M., et al. Emovo corpus: an italian emotional speech database. In Proceedings of the ninth international conference on language resources and evaluation (LREC'14), pp. 3501-3504. European Language Resources Association (ELRA), 2014.", + "Défossez, A., Copet, J., Synnaeve, G., and Adi, Y. High fidelity neural audio compression. arXiv preprint arXiv:2210.13438, 2022.", + "Défossez, A., Mazaré, L., Orsini, M., Royer, A., Pérez, P., Jégou, H., Grave, E., and Zeghidour, N. Moshi: a speech-text foundation model for real-time dialogue. arXiv preprint arXiv:2410.00037, 2024.", + "Doh, S., Choi, K., Lee, J., and Nam, J. Lp-musiccaps: Llm-based pseudo music captioning. arXiv preprint arXiv:2307.16372, 2023.", + "Drossos, K., Lipping, S., and Virtanen, T. Clotho: An audio captioning dataset. In ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 736-740. IEEE, 2020.", + "Du, Z., Chen, Q., Zhang, S., Hu, K., Lu, H., Yang, Y., Hu, H., Zheng, S., Gu, Y., Ma, Z., et al. Cosyvoice: A scalable multilingual zero-shot text-to-speech synthesizer" + ], + "bbox": [ + 86, + 84, + 475, + 906 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "based on supervised semantic tokens. arXiv preprint arXiv:2407.05407, 2024.", + "Hao, H., Zhou, L., Liu, S., Li, J., Hu, S., Wang, R., and Wei, F. Boosting large language model for speech synthesis: An empirical study. arXiv preprint arXiv:2401.00246, 2023.", + "He, K., Chen, X., Xie, S., Li, Y., Dollar, P., and Girshick, R. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 16000-16009, 2022.", + "Ho, J., Jain, A., and Abbeel, P. Denoising diffusion probabilistic models. Advances in neural information processing systems, 33:6840-6851, 2020.", + "Hsu, W.-N., Bolte, B., Tsai, Y.-H. H., Lakhotia, K., Salakhutdinov, R., and Mohamed, A. Hubert: Self-supervised speech representation learning by masked prediction of hidden units. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 29:3451-3460, 2021.", + "Hu, E. J., Shen, Y., Wallis, P., Allen-Zhu, Z., Li, Y., Wang, S., Wang, L., and Chen, W. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685, 2021.", + "Huang, P.-Y., Xu, H., Li, J., Baevski, A., Auli, M., Galuba, W., Metze, F., and Feichtenhofer, C. Masked autoencoders that listen. Advances in Neural Information Processing Systems, 35:28708-28720, 2022.", + "Ji, S., Jiang, Z., Wang, W., Chen, Y., Fang, M., Zuo, J., Yang, Q., Cheng, X., Wang, Z., Li, R., et al. Wavtokenizer: an efficient acoustic discrete codec tokenizer for audio language modeling. arXiv preprint arXiv:2408.16532, 2024.", + "Ju, Z., Wang, Y., Shen, K., Tan, X., Xin, D., Yang, D., Liu, Y., Leng, Y., Song, K., Tang, S., et al. Naturalspeech 3: Zero-shot speech synthesis with factorized codec and diffusion models. arXiv preprint arXiv:2403.03100, 2024.", + "Kang, W., Yang, X., Yao, Z., Kuang, F., Yang, Y., Guo, L., Lin, L., and Povey, D. Libriheavy: a 50,000 hours asr corpus with punctuation casing and context. In ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 10991-10995. IEEE, 2024.", + "Kharitonov, E., Vincent, D., Borsos, Z., Marinier, R., Girgin, S., Pietquin, O., Sharifi, M., Tagliasacchi, M., and Zeghidour, N. Speak, read and prompt: High-fidelity text-to-speech with minimal supervision. arXiv preprint arXiv:2302.03540, 2023." + ], + "bbox": [ + 500, + 84, + 885, + 905 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling", + "bbox": [ + 207, + 56, + 763, + 71 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Kim, C. D., Kim, B., Lee, H., and Kim, G. Audiocaps: Generating captions for audios in the wild. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pp. 119-132, 2019.", + "Kreuk, F., Synnaeve, G., Polyak, A., Singer, U., Défossez, A., Copet, J., Parikh, D., Taigman, Y., and Adi, Y. Audiogen: Textually guided audio generation. arXiv preprint arXiv:2209.15352, 2022.", + "Kumar, R., Seetharaman, P., Luebs, A., Kumar, I., and Kumar, K. High-fidelity audio compression with improved RVQGAN. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=qjnl1QUUnFA.", + "La Quatra, M., Koudounas, A., Vaiani, L., Baralis, E., Cagliero, L., Garza, P., and Siniscalchi, S. M. Benchmarking representations for speech, music, and acoustic events. In 2024 IEEE International Conference on Acoustics, Speech, and Signal Processing Workshops (ICAS-SPW), pp. 505-509, 2024. doi: 10.1109/ICASSPW62465.2024.10625960.", + "Li, H., Xue, L., Guo, H., Zhu, X., Lv, Y., Xie, L., Chen, Y., Yin, H., and Li, Z. Single-codec: Single-codebook speech codec towards high-performance speech generation. arXiv preprint arXiv:2406.07422, 2024.", + "Li, J., Li, D., Savarese, S., and Hoi, S. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In International conference on machine learning, pp. 19730-19742. PMLR, 2023.", + "Lipman, Y., Chen, R. T., Ben-Hamu, H., Nickel, M., and Le, M. Flow matching for generative modeling. arXiv preprint arXiv:2210.02747, 2022.", + "Liu, H., Xu, X., Yuan, Y., Wu, M., Wang, W., and Plumbley, M. D. Semanticodec: An ultra low bitrate semantic audio codec for general sound. arXiv preprint arXiv:2405.00233, 2024.", + "Lostanlen, V. and Cella, C.-E. Deep convolutional networks on the pitch spiral for musical instrument recognition. arXiv preprint arXiv:1605.06644, 2016.", + "Mei, X., Meng, C., Liu, H., Kong, Q., Ko, T., Zhao, C., Plumbley, M. D., Zou, Y., and Wang, W. Wavcaps: A chatgpt-assisted weakly-labelled audio captioning dataset for audio-language multimodal research. arXiv preprint arXiv:2303.17395, 2023.", + "Nguyen, T. A., Muller, B., Yu, B., Costa-Jussa, M. R., Elbayad, M., Popuri, S., Ropers, C., Duquenne, P.-A., Algayres, R., Mavlyutov, R., et al. Spirit-lm: Interleaved" + ], + "bbox": [ + 86, + 84, + 478, + 906 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "spoken and written language model. Transactions of the Association for Computational Linguistics, 13:30-52, 2025.", + "OpenAI. Gpt-4 technical report. arXiv preprint arXiv:2204.06125, 2023.", + "Panayotov, V., Chen, G., Povey, D., and Khudanpur, S. Librispeech: an asr corpus based on public domain audio books. In 2015 IEEE international conference on acoustics, speech and signal processing (ICASSP), pp. 5206-5210. IEEE, 2015.", + "Parker, J. D., Smirnov, A., Pons, J., Carr, C., Zukowski, Z., Evans, Z., and Liu, X. Scaling transformers for low-bitrate high-quality speech coding. arXiv preprint arXiv:2411.19842, 2024.", + "Piczak, K. J. Esc: Dataset for environmental sound classification. In Proceedings of the 23rd ACM international conference on Multimedia, pp. 1015-1018, 2015.", + "Pratap, V., Xu, Q., Sriram, A., Synnaeve, G., and Collobert, R. Mls: A large-scale multilingual dataset for speech research. arXiv preprint arXiv:2012.03411, 2020.", + "Reddy, C. K., Gopal, V., and Cutler, R. Dnsmos p. 835: A non-intrusive perceptual objective speech quality metric to evaluate noise suppressors. In ICASSP 2022-2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 886-890. IEEE, 2022.", + "Saeki, T., Xin, D., Nakata, W., Koriyama, T., Takamichi, S., and Saruwatari, H. Utmos: Utokyo-sarulab system for voicemos challenge 2022. arXiv preprint arXiv:2204.02152, 2022.", + "Siuzdak, H. Vocos: Closing the gap between time-domain and fourier-based neural vocoders for high-quality audio synthesis. arXiv preprint arXiv:2306.00814, 2023.", + "Tang, C., Yu, W., Sun, G., Chen, X., Tan, T., Li, W., Lu, L., MA, Z., and Zhang, C. SALMONN: Towards generic hearing abilities for large language models. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=14rn7HpKVk.", + "van Niekerk, B., Zäïdi, J., Carbonneau, M.-A., and Kamper, H. Spoken-term discovery using discrete speech units. arXiv preprint arXiv:2408.14390, 2024.", + "Veaux, C., Yamagishi, J., MacDonald, K., et al. Cstr vctk corpus: English multi-speaker corpus for cstr voice cloning toolkit. University of Edinburgh. The Centre for Speech Technology Research (CSTR), 6:15, 2017." + ], + "bbox": [ + 500, + 84, + 888, + 906 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling", + "bbox": [ + 207, + 56, + 763, + 71 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 477, + 922, + 493, + 934 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Wang, C., Chen, S., Wu, Y., Zhang, Z., Zhou, L., Liu, S., Chen, Z., Liu, Y., Wang, H., Li, J., et al. Neural codec language models are zero-shot text to speech synthesizers. arXiv preprint arXiv:2301.02111, 2023.", + "Wang, H., Suri, S., Ren, Y., Chen, H., and Shrivastava, A. Larp: Tokenizing videos with a learned autoregressive generative prior. arXiv preprint arXiv:2410.21264, 2024a.", + "Wang, Y., Chen, H., Yang, D., Yu, J., Weng, C., Wu, Z., and Meng, H. Consistent and relevant: Rethink the query embedding in general sound separation. In ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 961-965. IEEE, 2024b.", + "Wang, Y., Chen, H., Yang, D., Li, W., Luo, D., Li, G., Yang, S., Wu, Z., Meng, H., and Wu, X. Unisep: Universal target audio separation with language models at scale. arXiv preprint arXiv:2503.23762, 2025.", + "Wu, H., Kanda, N., Eskimez, S. E., and Li, J. Ts3-codec: Transformer-based simple streaming single codec. arXiv preprint arXiv:2411.18803, 2024.", + "Yang, D., Liu, S., Huang, R., Lei, G., Weng, C., Meng, H., and Yu, D. Instructts: Modelling expressive tts in discrete latent space with natural language style prompt. arXiv preprint arXiv:2301.13662, 2023a.", + "Yang, D., Liu, S., Huang, R., Tian, J., Weng, C., and Zou, Y. Hifi-codec: Group-residual vector quantization for high fidelity audio codec. arXiv preprint arXiv:2305.02765, 2023b.", + "Yang, D., Tian, J., Tan, X., Huang, R., Liu, S., Chang, X., Shi, J., Zhao, S., Bian, J., Wu, X., et al. Uniaudio: An audio foundation model toward universal audio generation. arXiv preprint arXiv:2310.00704, 2023c.", + "Yang, D., Yu, J., Wang, H., Wang, W., Weng, C., Zou, Y., and Yu, D. Diffsound: Discrete diffusion model for text-to-sound generation. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 2023d.", + "Yang, D., Guo, H., Wang, Y., Huang, R., Li, X., Tan, X., Wu, X., and Meng, H. Uniaudio 1.5: Large language model-driven audio codec is a few-shot audio task learner. arXiv preprint arXiv:2406.10056, 2024a.", + "Yang, D., Huang, R., Wang, Y., Guo, H., Chong, D., Liu, S., Wu, X., and Meng, H. Simplespeech 2: Towards simple and efficient text-to-speech with flow-based scalar latent transformer diffusion models. arXiv preprint arXiv:2408.13893, 2024b." + ], + "bbox": [ + 86, + 84, + 475, + 904 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yang, D., Wang, D., Guo, H., Chen, X., Wu, X., and Meng, H. Simplespeech: Towards simple and efficient text-to-speech with scalar latent transformer diffusion models. arXiv preprint arXiv:2406.02328, 2024c.", + "Yang, S.-w., Chi, P.-H., Chuang, Y.-S., Lai, C.-I. J., Lakhotia, K., Lin, Y. Y., Liu, A. T., Shi, J., Chang, X., Lin, G.-T., et al. Superb: Speech processing universal performance benchmark. arXiv preprint arXiv:2105.01051, 2021.", + "Yu, Q., Weber, M., Deng, X., Shen, X., Cremers, D., and Chen, L.-C. An image is worth 32 tokens for reconstruction and generation. arXiv preprint arXiv:2406.07550, 2024.", + "Zeghidour, N., Luebs, A., Omran, A., Skoglund, J., and Tagliasacchi, M. Soundstream: An end-to-end neural audio codec. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 30:495-507, 2021.", + "Zen, H., Dang, V., Clark, R., Zhang, Y., Weiss, R. J., Jia, Y., Chen, Z., and Wu, Y. Libritts: A corpus derived from librispeech for text-to-speech. arXiv preprint arXiv:1904.02882, 2019.", + "Zeng, A., Du, Z., Liu, M., Wang, K., Jiang, S., Zhao, L., Dong, Y., and Tang, J. Glm-4-voice: Towards intelligent and human-like end-to-end spoken chatbot. arXiv preprint arXiv:2412.02612, 2024.", + "Zhang, X., Zhang, D., Li, S., Zhou, Y., and Qiu, X. Speechtokenizer: Unified speech tokenizer for speech large language models. arXiv preprint arXiv:2308.16692, 2023.", + "Zhu, L., Wei, F., Lu, Y., and Chen, D. Scaling the codebook size of vqgan to 100,000 with a utilization rate of $99\\%$ . arXiv preprint arXiv:2406.11837, 2024." + ], + "bbox": [ + 500, + 84, + 885, + 607 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling", + "bbox": [ + 207, + 56, + 763, + 71 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/1c1b8a8c84d828412e41619aab208a4b72ed13470a39bdd8ec78692ea2ef9051.jpg", + "image_caption": [ + "Figure 4. The left diagram illustrates the framework of the audio language model, which includes a pre-trained LLM, a LoRA module, and a depth transformer. The audio language model can process both text and audio streaming inputs and generate corresponding text and audio outputs. The right diagram provides details of hierarchical audio modeling." + ], + "image_footnote": [], + "bbox": [ + 155, + 84, + 480, + 383 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/6c46a09a51c2d1af91d3429bf4ad54706550bf382847ad3d6090b3a56ed70075.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 526, + 80, + 823, + 391 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A. The details of audio language model framework", + "text_level": 1, + "bbox": [ + 84, + 512, + 514, + 530 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In this section, we provide details of the audio language model. We follow the framework of UniAudio (Yang et al., 2023c) and Moshi (Défossez et al., 2024), which combines a pre-trained LLM with a smaller Transformer model to predict audio tokens in a hierarchical manner. In their original paper, both the LLM and the small Transformer are updated during the training process. Due to resource limitations, and following (Hao et al., 2023), we incorporate LoRA (Hu et al., 2021) into the LLM model. For the LLM model, we use the LLAMA3.2 1B version. During training, we update only the LoRA module and the small Transformer.", + "bbox": [ + 84, + 537, + 888, + 628 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "LORA setting For the LoRA module, we add LoRA parameters to the self-attention and linear layers. We set $lora_{r} = 32$ and $lora_{alpha} = 16$ .", + "bbox": [ + 84, + 636, + 887, + 667 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Depth Transformer setting For the depth transformer, we use 6 self-attention layer. We set the attention head number as 32. The attention dimension is the same as the LLAMA 3.2 1B.", + "bbox": [ + 84, + 674, + 887, + 703 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "B. The details of the influence of bitrate and semantic information for audio language model.", + "text_level": 1, + "bbox": [ + 84, + 724, + 862, + 741 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In this section, we provide details of the validation experiments to explore the influence of bitrate and semantic information on audio language models. Following AudioLM (Borsos et al., 2023a), we construct an audio token pre-training task similar to text pre-training, where the model is tasked with predicting the next audio token based on the previous token sequence.", + "bbox": [ + 84, + 750, + 887, + 797 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "B.1. Training data", + "text_level": 1, + "bbox": [ + 86, + 811, + 218, + 827 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We conduct the experiments on 2000 hours speech data, these data is selected from MLS dataset (Pratap et al., 2020).", + "bbox": [ + 84, + 835, + 854, + 851 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "B.2. Test data", + "text_level": 1, + "bbox": [ + 86, + 867, + 186, + 881 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We evaluate on LibriSpeech test clean set.", + "bbox": [ + 84, + 890, + 364, + 905 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling", + "bbox": [ + 207, + 56, + 764, + 70 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/ebeb2a8c7b24539dd827c8b2160b82d162ceb27058d6262bf860960f2abb7cb7.jpg", + "table_caption": [ + "Table 8. The reconstruction performance of different frame rate of audio tokenizers." + ], + "table_footnote": [], + "table_body": "
VersionBitrate (↓)FPS (↓)codebook sizePESQ (↑)UT-MOS (↑)VISQOL (↑)STOI (↑)
50hz1650bps5020482.223.693.630.86
25hz825bps2520482.073.563.610.83
12.5hz412.5bps12.520481.582.493.370.77
", + "bbox": [ + 161, + 106, + 810, + 172 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "B.3. Framework", + "text_level": 1, + "bbox": [ + 84, + 292, + 205, + 305 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We use the same framework as described in Section A; the difference is that we do not use text streaming.", + "bbox": [ + 84, + 316, + 777, + 330 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "B.4. Three Types of Audio Tokenizers", + "text_level": 1, + "bbox": [ + 84, + 347, + 349, + 363 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Following the structure of MimiCodec (Défossez et al., 2024), we train three versions of the audio codec tokenizer. All of the audio codec models are trained on $24\\mathrm{kHz}$ speech data. We train three versions of the audio codec models, as follows:", + "bbox": [ + 84, + 371, + 887, + 402 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(V1) We set the down-sampling rate to [2, 5, 6, 8], resulting in a $50\\mathrm{Hz}$ frame rate. We use three RVQ layers, and the codebook size is 2,048. The bitrate of this audio codec is 1.65 kbps.", + "(V2) We set the down-sampling rate to [4, 5, 6, 8], resulting in a $25\\mathrm{Hz}$ frame rate. We use three RVQ layers, and the codebook size is 2,048. The bitrate of this audio codec is 825 bps.", + "(V3) We set the down-sampling rate to [2, 4, 5, 6, 8], resulting in a $12.5\\mathrm{Hz}$ frame rate. We use three RVQ layers, and the codebook size is 2,048. The bitrate of this audio codec is 412.5 bps." + ], + "bbox": [ + 84, + 407, + 885, + 516 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Note that the original MimiCodec is trained with distillation loss from WavLM; we do not add this loss during the training of our audio tokenizer. Therefore, these three audio tokenizers do not include any semantic information. Table 8 shows the reconstruction performance of the three audio tokenizers.", + "bbox": [ + 84, + 521, + 887, + 568 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "B.5. Semantic Tokenizer", + "text_level": 1, + "bbox": [ + 84, + 584, + 259, + 598 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The previous three audio codec tokenizers do not consider semantic information. To evaluate the importance of semantic information, we follow WhisperSpeech5 to build a Whisper-based semantic tokenizer. Specifically, we follow the training code of WhisperSpeech, using two down-sampling layers to compress the Whisper encoder's features into a $12.5\\mathrm{Hz}$ frame rate, and then we add three RVQ layers to quantize them. Thus, this semantic tokenizer has the same bitrate as the V3 audio tokenizer.", + "bbox": [ + 84, + 607, + 887, + 681 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "B.6. Evaluation metrics", + "text_level": 1, + "bbox": [ + 84, + 699, + 254, + 713 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We evaluate the pre-training performance from the following aspects:", + "bbox": [ + 84, + 723, + 540, + 739 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Training efficiency: As is well known, the space complexity of a transformer is $O(T^2)$ , where $T$ is the sequence length. A low-bitrate audio tokenizer can compress the audio signal into a few token sequences, thereby improving training efficiency. For all experiments, we use the same GPU machine to train the model and record the statistical training duration.", + "bbox": [ + 84, + 744, + 887, + 792 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Inference efficiency: Similarly, a low-bitrate audio tokenizer can improve inference efficiency, as it requires fewer inference steps. We use the Real-Time Factor (RTF) to assess inference efficiency. Note that for all experiments, we do not use any inference optimization tricks, such as KV cache.", + "bbox": [ + 84, + 797, + 887, + 844 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Validation loss and perplexity: Following text LLMs (OpenAI, 2023), we use validation loss and perplexity to evaluate model performance.", + "bbox": [ + 84, + 849, + 887, + 882 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling", + "bbox": [ + 207, + 56, + 763, + 70 + ], + "page_idx": 13 + }, + { + "type": "page_footnote", + "text": "5https://github.com/WhisperSpeech/WhisperSpeech", + "bbox": [ + 104, + 890, + 415, + 906 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/d94fab77d52195e7058b5d482d8f7f1f1f1533b9c9ca7a8d3dd5363564b5f2ed.jpg", + "image_caption": [ + "Figure 5. The performance comparison with different window size during inference." + ], + "image_footnote": [], + "bbox": [ + 272, + 87, + 702, + 354 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/17f3ab99fd1a09e4e9beee635e4fd0043664ff45bf148bb33396b5a7d9c15ef7.jpg", + "table_caption": [ + "Table 9. The influence of codebook size for reconstruction performance." + ], + "table_footnote": [], + "table_body": "
Codebook SizePESQ (↑)UT-MOS (↑)VISQOL (↑)STOI (↑)STFT loss (↓)Token utilization (↑)
20482.03.763.780.811.20100%
10241.833.663.650.801.14100%
5121.693.643.580.7921.18100%
", + "bbox": [ + 140, + 436, + 831, + 502 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "C. Ablation study", + "text_level": 1, + "bbox": [ + 84, + 534, + 238, + 551 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "C.1. The influence of window size for ALMTokenizer", + "text_level": 1, + "bbox": [ + 84, + 560, + 459, + 574 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "As discussed in the previous section, the proposed ALMTokensizer supports a dynamic compression rate by changing the window size $w$ . Figure 5 shows the comparison of reconstruction performance with different window sizes. We observe that using a smaller window size results in better reconstruction performance, but it also increases the bitrate. For example, if the window size is 2, the bitrate is 1237.5bps, window size is 6, the bitrate is 412.5. It also shows the advantages of proposed method: we can dynamically change the frame rate during the inference by setting different window size.", + "bbox": [ + 84, + 583, + 887, + 659 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "C.2. The influence of codebook size", + "text_level": 1, + "bbox": [ + 84, + 676, + 334, + 690 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We explore three different codebook sizes: 512, 1024, and 2048. To align with the setting of MimiCodec (Défossez et al., 2024), we set the max codebook size as 2048. The results, as shown in Table 9, are presented. We observe that scaling the codebook size improves reconstruction performance. Furthermore, we also find that almost all tokens have been used.", + "bbox": [ + 84, + 699, + 888, + 744 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "C.3. The influence of model size for reconstruction performance", + "text_level": 1, + "bbox": [ + 84, + 761, + 532, + 776 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "To explore the influence of model size on reconstruction performance, we set up two configurations: (1) We use 24 self-attention layers for both the transformer encoder and transformer decoder, resulting in 174M parameters. (2) We use 12 self-attention layers for both the transformer encoder and transformer decoder, resulting in 87M parameters. In both settings, we keep the Patchify module the same size, as it consists of several convolutional layers, and its total parameters are small. The experimental results, as shown in Table 10, indicate that using a larger model can improve reconstruction but also increases computational resource consumption (higher RTF). Previous work, StableCodec (Parker et al., 2024), shows that scaling the codec model to 1B parameters can lead to better performance. Due to computational resource limitations, we leave scaling to a larger model size for future work.", + "bbox": [ + 84, + 785, + 888, + 905 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling", + "bbox": [ + 207, + 56, + 763, + 70 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/65e54bea32de06f51d9c5ce9da1e1a2189806386fd5b23c8a2f36505d83d130b.jpg", + "table_caption": [ + "Table 10. The influence of model for reconstruction performance." + ], + "table_footnote": [], + "table_body": "
SettingPESQ (↑)UT-MOS (↑)VISQOL (↑)STOI (↑)Model size (↓)RTF (↓)
24 attention layer2.03.763.780.811740.031
12 attention layer1.873.573.700.79870.019
", + "bbox": [ + 171, + 106, + 802, + 160 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C.4. The influence of mask-rate in MAE loss", + "text_level": 1, + "bbox": [ + 84, + 183, + 398, + 196 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Inspired by MAE(He et al., 2022), we tested three groups of mask rates ranges: (10–20%), (20–30%), and (30–40%). The experiments as following Table shows. Results indicate that higher rates (30–40%) benefit semantics but harm reconstruction, leading us to adopt an intermediate range (20–30%).", + "bbox": [ + 83, + 205, + 887, + 252 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/105b727952b5ca74e95cd96f0079e5c39919e423b44aa967df89a083ef0b942e.jpg", + "table_caption": [ + "Table 11. The influence of mask-rate for MAE loss." + ], + "table_footnote": [], + "table_body": "
mask rate rangeUTMOSDNSMOSVISQOLPESQSTOIASRER
10-20%3.773.623.802.00.8118.727.7
20-30%3.763.643.782.00.8118.329.0
30-40%3.363.063.311.580.7718.129.6
", + "bbox": [ + 192, + 286, + 777, + 351 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "D. Evaluation", + "text_level": 1, + "bbox": [ + 84, + 383, + 205, + 398 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We evaluate the performance of the previous SOTA audio tokenizers and our proposed ALMTokensizer across audio reconstruction, audio semantic information, audio understanding, and audio generation tasks.", + "bbox": [ + 84, + 410, + 885, + 440 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "D.1. Audio Reconstruction", + "text_level": 1, + "bbox": [ + 84, + 455, + 274, + 469 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "For speech data, we use DNS-MOS (Reddy et al., 2022), UT-MOS (Saeki et al., 2022), PESQ, STOI (Short-Time Objective Intelligibility), VISQOL (speech version), and STFT loss as metrics.", + "bbox": [ + 84, + 479, + 885, + 510 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "For sound and music data, we use VISQOL (audio version), STFT loss, and Mel loss. Furthermore, following (Kumar et al., 2023), we conduct the MUSHRA subjective test for speech, sound, and music. Specifically, we hire 10 audio-related researchers to conduct the MOS evaluation. We ask the listeners to rate each audio, with scores ranging from 0 to 100. Refer to D.5 for the details.", + "bbox": [ + 84, + 518, + 887, + 578 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Evaluation Datasets: For speech data, we evaluate on a subset of VCTK (Veaux et al., 2017) (200 speech utterances) and a subset of the LibriTTS test clean set (Zen et al., 2019) (400 speech utterances). For sound data, we evaluate on a subset of the AudioCaps validation set (Kim et al., 2019) (200 sound utterances). For music data, we evaluate on a subset of the MusicCaps (Agostinelli et al., 2023) dataset (200 music utterances).", + "bbox": [ + 84, + 585, + 887, + 647 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "D.2. Audio Semantic Information", + "text_level": 1, + "bbox": [ + 84, + 662, + 321, + 676 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Previous SSL models, such as Hubert (Hsu et al., 2021) and WavLM (Chen et al., 2022a), have shown that semantic-rich representations can be used to solve downstream recognition tasks by fine-tuning several adaptor layers. Inspired by these works, we propose evaluating the performance of the audio tokenizer for downstream recognition tasks. We use the quantized features of the audio tokenizer as the input for downstream tasks. We follow two popular benchmarks: SUPERB (Yang et al., 2021) and ARCH (La Quatra et al., 2024).", + "bbox": [ + 84, + 686, + 887, + 763 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "For speech data, we conduct the automatic speech recognition (ASR) task on the LibriSpeech (Panayotov et al., 2015) dataset and the emotion classification (EC) task on the EMOVO (Costantini et al., 2014) dataset. For the ASR task, we train on the LibriSpeech train-100 set and evaluate on the LibriSpeech test clean set. For the EC task, we follow ARCH (La Quatra et al., 2024) to split the training and test sets.", + "bbox": [ + 84, + 768, + 887, + 830 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "For sound data, we conduct the sound classification task on the ESC-50 dataset (Piczak, 2015). For music data, we conduct the music classification task on the Medley-Solos-DB dataset (Lostanlen & Cella, 2016). For both tasks, we follow the ARCH benchmarking settings to split the training and test sets.", + "bbox": [ + 84, + 837, + 887, + 883 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "For all experiments, we train for 10 epochs with the same learning rate and batch size. For the automatic speech recognition", + "bbox": [ + 84, + 890, + 887, + 906 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling", + "bbox": [ + 207, + 56, + 763, + 70 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "task, we use word error rate (WER) as the metric. For the other classification tasks, we use accuracy as the metric.", + "bbox": [ + 84, + 85, + 833, + 99 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "D.3. LM-based Audio Understanding", + "text_level": 1, + "bbox": [ + 84, + 116, + 349, + 132 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Overview To further validate whether the audio tokenizer is suitable for building an audio language model, we propose conducting an audio understanding task using discrete tokens as input. We conduct three tasks: automatic speech recognition (ASR), audio captioning, and music captioning. We use the framework introduced in Section A. For audio data, we use the audio tokenizer to encode it as discrete tokens; for text data, we use the BPE tokenizer of LLAMA 3.2. We construct the sequence as [audio token, text token], then the model is asked to predict the text token based on the previous audio token.", + "bbox": [ + 84, + 140, + 887, + 215 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Training Data For the ASR task, we select 2,000 hours of LibriHeavy speech data (Kang et al., 2024). For the audio captioning tasks, we use AudioCaps (Kim et al., 2019) and BBC sound effects (Mei et al., 2023). For the BBC sound effects, we cut off the first 10 seconds of audio if the utterance duration is greater than 10 seconds. Finally, we obtain about 500 hours of sound data. For the music captioning task, we use a subset of the Million Song dataset. We cut off the first 10 seconds of music data for each utterance, which results in about 500 hours of music data. For the corresponding captions, we use LPMusicCaps (Doh et al., 2023).", + "bbox": [ + 84, + 223, + 887, + 314 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Test Data For the ASR task, we evaluate on the LibriSpeech test clean set. For the audio captioning task, we evaluate on the AudioCaps dataset (Kim et al., 2019). For the music captioning task, we evaluate on the MusicCaps dataset (Agostinelli et al., 2023).", + "bbox": [ + 84, + 321, + 885, + 367 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Metrics Similarly, we use WER as the evaluation metric for the ASR task. For audio and music captioning, we follow (Drossos et al., 2020) and adopt BLEU-1, BLEU-2, BLEU-3, METEOR, ROUGE-L, CIDEr-D, SPICE, and SPIDER metrics.", + "bbox": [ + 84, + 375, + 887, + 405 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Inference Setting For inference, we directly use the top-k sampling strategy and set $k = 30$ for all experiments.", + "bbox": [ + 84, + 411, + 820, + 428 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "D.4. LM-based Audio Generation", + "text_level": 1, + "bbox": [ + 84, + 443, + 323, + 458 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We also perform audio generation tasks, including text-to-speech, text-to-sound, and text-to-music generation. Similarly, we construct the sequence as [text token, audio token], then the model is asked to predict the audio token based on the previous text token.", + "bbox": [ + 84, + 467, + 887, + 512 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Training and Test Data We use the same training and test data as the audio comprehension task.", + "bbox": [ + 84, + 520, + 723, + 536 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Metrics For TTS evaluation, we use WER to evaluate robustness, and UTMOS and DNSMOS are used to assess speech quality. For text-to-sound and text-to-music, we follow previous works AudioGen (Kreuk et al., 2022), using Fréchet Audio Distance (FAD), Kullback-Leibler (KL) Divergence, and Fréchet Distance (FD) for audio fidelity and similarity.", + "bbox": [ + 84, + 542, + 887, + 588 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Inference Setting During the inference stage, we use the top-k sampling strategy and set $k = 30$ for all experiments.", + "bbox": [ + 84, + 595, + 852, + 611 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "D.5. Subjective Evaluations", + "text_level": 1, + "bbox": [ + 84, + 627, + 282, + 642 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "For the subjective evaluations, we adopt the approach used in previous works (Kumar et al., 2023; Parker et al., 2024) and use the MUSHRA format without a hidden anchor. Listeners are asked to compare multiple versions of an example simultaneously, including both a labeled reference and a hidden reference. They are given the following instructions: \"Please assess the quality similarity between an audio sample and its reference. Listen carefully to the reference audio, then rate the quality of each test clip in comparison. A score of 0 indicates no resemblance to the reference, while a score of 100 means it is identical to the reference.\" We randomly select 10 samples from each category (speech, music, and sound) in the test set, ensuring that each sample receives 10 ratings.", + "bbox": [ + 84, + 651, + 887, + 756 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "E. Audio Tokenizer Baselines", + "text_level": 1, + "bbox": [ + 84, + 775, + 333, + 791 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "To make a fair comparison, we classify the audio tokenizers into two types: (1) speech-based tokenizers, which are trained on speech datasets, and (2) audio-based tokenizers, which are trained on speech, sound, and music datasets.", + "bbox": [ + 84, + 801, + 887, + 832 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "E.1. Speech Tokenizer", + "text_level": 1, + "bbox": [ + 84, + 848, + 243, + 863 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "For speech data, we compare with:", + "bbox": [ + 84, + 872, + 316, + 888 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling", + "bbox": [ + 207, + 56, + 763, + 70 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/847af1e78f02e93f03fa836136aadb7e2bfa7fe3f787b38cbddd0915c8294463.jpg", + "table_caption": [ + "Table 12. The performance comparison on LibriTTS test clean. Bold for the best result and underline for the second-best result." + ], + "table_footnote": [], + "table_body": "
ModelsFPS/TPSCS/BRReconstructionEfficiency
UTMOS (↑)DNS-MOS (↑)VISQOL (↑)STOI (↑)PESQ (↑)Model size (M) (↓)RTF (↓)
Encodec50/4001024/6kbps3.303.763.950.942.72140.019
Encodec50/1501024/1.5kbps2.023.273.830.881.79140.019
DAC50/1501024/1.5kbps2.613.363.850.891.96710.026
Wavtokenizer40/404096/0.48kbps3.653.613.800.871.81770.017
StableCodec25/2546656/0.4kbps4.203.743.510.881.859500.039
MimiCodec (3 RVQ)12.5/37.52048/0.41kbps2.823.283.340.831.4075.60.023
ALMTokensizer (Ours)12.5/37.52048/0.41kbps3.683.643.900.901.921740.031
", + "bbox": [ + 91, + 107, + 879, + 224 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(1) Encodec (Defossez et al., 2022), a SOTA audio codec model trained on large-scale speech, sound, and music datasets. The official open-sourced $24\\mathrm{kHz}$ version is used.", + "(2) DAC-Codec (Kumar et al., 2023), which offers very high reconstruction performance. It is trained on large-scale speech, sound, and music datasets. The official open-sourced $24\\mathrm{kHz}$ version is used.", + "(3) MimiCodec (Défossez et al., 2024), a SOTA low-bitrate speech codec model trained on a large-scale speech dataset. The sampling rate is $24\\mathrm{kHz}$ .", + "(4) SpeechTokenizer (Zhang et al., 2023), a semantic-rich speech codec model trained on a large-scale speech dataset. The sampling rate is $16\\mathrm{kHz}$ .", + "(5) WavTokenizer (Ji et al., 2024), an audio codec tokenizer trained on large-scale speech, sound, and music datasets. The sampling rate is $24\\mathrm{kHz}$ ." + ], + "bbox": [ + 83, + 266, + 887, + 448 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "To make a fair comparison, for Encodec, DAC-Codec, and SpeechTokenizer, we use the first three RVQ layers to control the bitrate during inference.", + "bbox": [ + 83, + 455, + 883, + 484 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "E.2. Audio Tokenizer", + "text_level": 1, + "bbox": [ + 84, + 502, + 236, + 516 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "For sound and music data, we compare with Encodec, DAC-Codec, and WavTokenizer. These three models are trained on large-scale speech, sound, and music datasets.", + "bbox": [ + 83, + 525, + 885, + 556 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "E.3. Semantic Models", + "text_level": 1, + "bbox": [ + 84, + 571, + 241, + 585 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Furthermore, to evaluate the performance of semantic information, we also introduce several SSL-based models. For speech, we use WavLM (Chen et al., 2022a) and HuBERT (Hsu et al., 2021). For sound and music, we use BEATs (Chen et al., 2022b) and Wav2Vec2-AudioSet $^{6}$ .", + "bbox": [ + 83, + 595, + 887, + 642 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "F. More audio tokenizer evaluation experiments", + "text_level": 1, + "bbox": [ + 84, + 661, + 488, + 678 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "F.1. The subjective evaluation for audio tokenizer", + "text_level": 1, + "bbox": [ + 84, + 686, + 433, + 702 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Table 7 shows the subjective evaluation results for audio tokenizer.", + "bbox": [ + 84, + 710, + 524, + 724 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "F.2. Evaluation results on LibriTTS test clean", + "text_level": 1, + "bbox": [ + 84, + 742, + 406, + 756 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We report the reconstruction performance evaluated on a subset of the LibriTTS test clean set, where we randomly select 400 speech utterances. Additionally, we calculate the Real-Time Factor (RTF) and model size to assess efficiency. For RTF evaluation, we use an NVIDIA A100 GPU to evaluate all models.", + "bbox": [ + 83, + 766, + 885, + 810 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "F.3. Length generalization", + "text_level": 1, + "bbox": [ + 84, + 828, + 272, + 842 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "StableCodec (Parker et al., 2024) highlights that the introduction of transformer-based architectures can lead to the length generalization problem. For instance, the training data of ALMTokenizer consists of 5-second segments, whereas the test", + "bbox": [ + 83, + 851, + 885, + 881 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling", + "bbox": [ + 207, + 56, + 763, + 70 + ], + "page_idx": 17 + }, + { + "type": "page_footnote", + "text": "6https://huggingface.co/ALM/wav2vec2-large-audioset", + "bbox": [ + 104, + 890, + 434, + 905 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/f019bb22a5fffe4804a7ec41e4db6b17b6e1f070f9aabb5852f94021dc8b83bd.jpg", + "table_caption": [ + "Table 13. Objective metrics for the ALMTokenizer and baselines, evaluated on utterances from length 4s to 10s, showing generalization of models across lengths" + ], + "table_footnote": [], + "table_body": "
ModelFPSTPSBitratePESQ (↑)UT-MOS (↑)VISQOL (↑)STOI (↑)DNSMOS (↑)
4 seconds
Encodec501501.5kbps1.972.643.620.803.26
DAC501501.5kbps2.13.173.650.813.26
Ours12.537.50.41kbps1.843.633.690.793.41
6 seconds
Encodec501501.5kbps1.972.543.630.813.26
DAC501501.5kbps2.03.113.650.813.28
Ours12.537.50.41kbps1.893.663.750.813.62
8 seconds
Encodec501501.5kbps1.962.523.630.813.34
DAC501501.5kbps2.13.183.660.813.28
Ours12.537.50.41kbps1.953.553.740.813.66
10 seconds
Encodec501501.5kbps1.952.533.650.813.32
DAC501501.5kbps2.12.193.670.813.25
Ours12.537.50.41kbps1.963.543.730.813.66
", + "bbox": [ + 150, + 119, + 823, + 367 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "data comprises segments of varying durations. We evaluate the model across four distinct length levels: 4, 6, 8, and 10 seconds. Encodec and DAC are selected as baselines due to their reliance on convolutional layers, which demonstrate robustness to variable input lengths. As shown in Table 13, the evaluation results indicate that ALMTokensizer effectively handles inference across these diverse lengths. These findings suggest that ALMTokensizer exhibits strong generalization capabilities with respect to input length variation.", + "bbox": [ + 83, + 396, + 887, + 474 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "F.4. Compared to diffusion-based audio codec models", + "text_level": 1, + "bbox": [ + 84, + 488, + 460, + 503 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "We compare ALMTokens with an alternative family of audio tokenizers that leverage discrete semantic tokens derived from self-supervised pre-trained (SSL) models (e.g., Hubert (Hsu et al., 2021), WavLM (Chen et al., 2022a), AudioMAE (Huang et al., 2022)). These models first quantize the SSL features into semantic tokens and subsequently use a generative model to resynthesize the waveform. Diffusion (Ho et al., 2020) and Flow-Matching (Lipman et al., 2022) are two popular generative models. Previous works, such as GLM4-Voice tokenizer (Zeng et al., 2024) and SemantiCodec (Liu et al., 2024), have demonstrated success using diffusion-based decoders. However, such strategies tend to result in significant information loss. For instance, the semantic tokens in GLM4-Voice lack timbre information and require additional prompts to control timbre during decoding. Notably, the open-sourced GLM4-Voice tokenizer uses a fixed timbre, meaning that any speech encoded by GLM4-Voice will lose its original timbre. To address this information loss in semantic tokens, SemantiCodec introduces acoustic streaming to enhance waveform reconstruction. A key concern, however, is that both SemantiCodec and GLM4-Voice tokenizers demand significantly more computational resources during the inference stage. In the following, we present a comprehensive comparison between ALMTokens and SemantiCodec, focusing on the following aspects: (1) reconstruction performance for speech, sound, and music; (2) semantic information performance for speech, sound, and music; and (3) computational resource requirements during inference, measured using RTF.", + "bbox": [ + 83, + 512, + 888, + 724 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Table 14 shows the speech reconstruction and semantic performance, where we observe that ALMTokenizer outperforms the alternatives in both aspects while using less bitrate. Table 15 presents experimental results for sound and music data, where ALMTokenizer again demonstrates superior performance across all metrics compared to SemantiCodec. In Table 16, we present the model size and RTF metrics, showing that ALMTokenizer has fewer model parameters and significantly surpasses SemantiCodec in inference speed (0.031 vs 0.92).", + "bbox": [ + 83, + 729, + 887, + 808 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "G. The details of ALMTokenizer structure and training", + "text_level": 1, + "bbox": [ + 84, + 825, + 553, + 844 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "G.1. Model structure", + "text_level": 1, + "bbox": [ + 84, + 851, + 236, + 866 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Table 17 gives the details of ALMTokensizer configuration, which results in 174M parameters. In all of experiments, for the MAE-transformer encoded and decoder, we adopt a 8 layer transformer layers.", + "bbox": [ + 84, + 875, + 885, + 906 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling", + "bbox": [ + 207, + 56, + 761, + 70 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/f59e452f380f40477af82cd8fc63a94ba7b3a987a6cdc597a6a915a3c081ab47.jpg", + "table_caption": [ + "Table 14. The performance comparison between ALMTokensizer and SemanticCodec on VCTK dataset." + ], + "table_footnote": [], + "table_body": "
ModelsFPS/TPSCS/BRReconstructionSemantic
UTMOS (↑)DNS-MOS (↑)VISQOL (↑)STOI (↑)PESQ (↑)ASR (↓)EC (↑)
SemantiCodec50/5016384/0.68kbps3.23.573.900.811.7648.317.8
ALMTokensizer12.5/37.52048/0.41kbps3.763.643.780.812.018.329.0
", + "bbox": [ + 93, + 107, + 879, + 172 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/9f4ec6d1eb51ddf04ea710cf886db9d3b82fb21f6a7d4b9c84f522637ec04adb.jpg", + "table_caption": [ + "Table 15. The performance comparison between ALMTokensizer and SemanticCodec on Music (MusicCaps) and sound data (AudioCaps)." + ], + "table_footnote": [], + "table_body": "
ModelsFPS/TPSCS/BRReconstructionSemantic
Mel loss (↓)STFT loss (↓)VISQOL (↑)Classification (↑)
Sound data
SemantiCodec50/5016384/0.68kbps18.451.402.4738.8%
ALMTokensizer12.5/37.52048/0.41kbps15.01.242.9944%
Music data
SemantiCodec50/5016384/0.68kbps47.91.582.4948%
ALMTokensizer12.5/37.52048/0.41kbps34.41.323.9659%
", + "bbox": [ + 130, + 210, + 841, + 337 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Patchify and UnPatchify modules A single-channel audio signal $\\pmb{x} \\in \\mathcal{R}^{1 \\times N}$ (where $N$ denotes the sampling points) is processed through the Encodec-style Patchify and UnPatchify modules, which adopt the same structure as Encodec (Défossez et al., 2022), consisting of four convolutional blocks. Each convolutional block consists of a residual unit followed by a down-sampling layer. These convolution blocks effectively encode the audio signal $\\pmb{x}$ into an audio frame representation $e \\in \\mathcal{R}^{T \\times d}$ , where $T$ denotes the number of frames and $d$ denotes the dimension of each vector. The convolution blocks are followed by a two-layer LSTM for sequence modeling, followed by a final 1D convolutional layer with a kernel size of 7 and $D$ output channels. The UnPatchify module mirrors the Patchify architecture by substituting stride convolutions with transposed convolutions and reversing the stride order.", + "bbox": [ + 83, + 359, + 887, + 479 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "For the StableCodec-style Patchify and UnPatchify modules, we follow the approach in StableCodec (Parker et al., 2024) and use a reshape operation to transform $\\boldsymbol{x} \\in \\mathcal{R}^{t \\times sr}$ into $e \\in \\mathcal{R}^{T \\times d}$ , where $T = N / 320$ and $d = 320$ . We then apply a linear layer to map the dimension to $D$ . Finally, we add four transformer layers for sequence modeling. Similarly, the UnPatchify module mirrors the Patchify architecture.", + "bbox": [ + 83, + 488, + 887, + 549 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Discriminators For the discriminators, we follow prior work (Défossez et al., 2022), which combines mel-spectrogram and log-mel-spectrogram features and inputs them into a network consisting of several convolutional layers. Specifically, we use six discriminators with different configurations: the hidden dimensions are set as 64, 128, 256, 512, 512, 512, and the hop lengths are set as 32, 64, 128, 256, 512, 1024.", + "bbox": [ + 83, + 556, + 887, + 617 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "G.2. Reconstruction loss and adversarial loss for ALMTokenizer", + "text_level": 1, + "bbox": [ + 84, + 633, + 537, + 647 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Let the reconstructed signal be $\\hat{\\pmb{x}}$ . For the reconstruction loss, we design it from two perspectives: the time domain and the frequency domain. We first compute the $L_{1}$ loss between $\\pmb{x}$ and $\\hat{\\pmb{x}}$ in the time domain. Next, we compute the $L_{1}$ loss between the STFT spectrogram of $\\pmb{x}$ and $\\hat{\\pmb{x}}$ in the frequency domain. Following (Wang et al., 2024b), we employ a sub-band split strategy to divide the spectrogram into several parts. The adversarial loss is employed to enhance the perceptual quality of the generated audio:", + "bbox": [ + 83, + 657, + 887, + 734 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {d} = \\frac {1}{K} \\sum_ {i = 1} ^ {K} \\max (0, 1 - D _ {k} (\\boldsymbol {x})) + \\max (0, 1 + D _ {k} (\\hat {\\boldsymbol {x}})) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 294, + 739, + 885, + 781 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "where $K$ denotes the number of discriminators. During the training stage, the adversarial loss for the generator is computed as a hinge loss over the logits of these discriminators:", + "bbox": [ + 83, + 789, + 887, + 819 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {a d v} = \\frac {1}{K} \\sum_ {i = 1} ^ {K} \\max (0, 1 - D _ {k} (\\hat {\\boldsymbol {x}})) \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 364, + 827, + 885, + 868 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "The feature loss $\\mathcal{L}_{feat}$ is computed by taking the average absolute difference between the discriminator's internal layer outputs for the generated audio and those for the corresponding real audio.", + "bbox": [ + 83, + 875, + 887, + 906 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling", + "bbox": [ + 207, + 56, + 763, + 70 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 475, + 922, + 496, + 934 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/7eb3af90218801d9df7252db544ed8ebf2c58ddeb96adbbaf15ad379992732a6.jpg", + "table_caption": [ + "Table 16. The model size and RTF comparison between SemantiCodec and ALMTokensizer." + ], + "table_footnote": [], + "table_body": "
ModelModel size (M) (↓)RTF (↓)
SemantiCodec5070.92
ALMTokenizer (Ours)1740.031
", + "bbox": [ + 308, + 107, + 661, + 160 + ], + "page_idx": 20 + }, + { + "type": "table", + "img_path": "images/6d44cde2f64ff7b03fe3f9a37cca4f2bb251ac87eae5b10bc5cf0924c1174a82.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ALMTokenizer
Input shape(B, 1, N)
Patchify module (output)(B, T, d), T=N/320
Token Interleaving and Retrievalw ∈ [2, 3, 4, 5, 6, 7, 8, 9, 10]
Dimension of transformer encoder256
The number of transformer encoder24
Dimension of transformer decoder512
The number of transformer decoder24
Codebook size2048
VQ layers3
Number of Transformer heads64
UnPatchify module (output)(B, 1, N)
", + "bbox": [ + 256, + 171, + 715, + 369 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Table 17. ALMTokenizer model backbone configurations", + "bbox": [ + 313, + 386, + 656, + 401 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "G.3. Training details", + "text_level": 1, + "bbox": [ + 84, + 422, + 235, + 438 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "The AdamW optimizer is used in the training. We set the learn rate as $1e - 4$ . We train the model with 200k steps. The final loss as following shows. We set $\\lambda_{1} = 0.5$ and $\\lambda_{2} = 0.1$ during our experiments. We conduct all of the experiments with 4 NVIDIA A100-80G GPUs.", + "bbox": [ + 83, + 446, + 888, + 492 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\mathbf {L} _ {\\text {a d v}} + \\mathbf {L} _ {\\text {f e a t}} + \\mathbf {L} _ {\\text {r e c}} + \\lambda_ {1} \\mathbf {L} _ {\\text {M A E}} + \\lambda_ {2} \\mathbf {L} _ {\\text {A R}} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 321, + 502, + 885, + 520 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "H. Reproducibility Statement", + "text_level": 1, + "bbox": [ + 84, + 537, + 336, + 555 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "To enhance reproducibility, we provide the pseudocode of ALMTokensizer. In the future, we plan to improve both the model structure and training data to obtain more robust models, especially for music and sound, and release the code for the research community.", + "bbox": [ + 83, + 563, + 888, + 609 + ], + "page_idx": 20 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Listing 1. Pseudocode of ALMTokenizer" + ], + "code_body": "class ALMTokensizer: def __init__(self, transformerEncoder_args, transformerDecoder_args, maeDecoder_args, depth_gpt_args, patchify_args, encoder_embedding_dim, decoder_embedding_dim, semantic_prior_path, mask_rate, window_sizes = [2,3,4,5,6,7,8,9,10],): self(window_sizes = window_sizes self.transformerEncoder = Transformer(transformerEncoder_args) self.transformerDecoder = Transformer(transformerDecoder_args) self.maedecoder = Transformer(maedecoder_args) self.Patchify = EncodeEncoder(patchify_args) self.UnPatchify = EncodeDecoder(patchify_args)", + "guess_lang": "python", + "bbox": [ + 86, + 642, + 885, + 907 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling", + "bbox": [ + 207, + 56, + 764, + 71 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 475, + 922, + 495, + 935 + ], + "page_idx": 20 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "self.cls_token = nnParameter(torch.zeros(1, 1, encoder_embedding_dim))\nselfmasked_token = nnParameter(torch.zeros(1, 1, decoder_embedding_dim))\ncheckpoint = torch.load(semantic_prior_path, map_location=\"cpu\")\nself.vq = RVQ_semantic(\n input_dim=encoder_embedding_dim,\n semantic_prior = checkpoint,\n layers = 3)\nself.depth_gpt = GPT Decoder(depth_gpt_args)\nself.temp_window_size = 6\nself_mask_rate = mask_rate\ndef Encoder_token_Interleaving(self, x):\n B, T, D = x.shape # batch, length, dim\ncls_tokens = self.cls_tokenrepeat(B, (T//self.tmp_window_size), 1).unsqueeze(2)\n new_T = T + (T // self.tmp_window_size)\nx reshaped = x reshape(B, T // self.tmp_window_size, self.tmp_window_size, D)\nx_withCls = torch.cat([x reshaped, cls_tokens], dim=2)\nnew_x = x_withCls.reshape(B, -1, D)\nreturn new_x\ndef Encoder_token_Retrieval(self, x):\n B, new_T, D = x.shape\noriginal_T = new_T - new_T // (self.tmp_window_size + 1)\nmask Indices = [(i + 1) * (self.tmp_window_size + 1) - 1 for i in range(original_T // self.tmp_window_size)]\ncls_tokens = new_x[;, mask Indices, :]\nreturnCLS_tokens\ndef Decoder_token_Interleaving(self, en_token):\n B, T, D = en_token.shape\nx = self-mask_tokenrepeat(B, 1, 1)\nnew_T = en_token.shape[1] * self.tmp_window_size + en_token.shape[1]\nx = x.repeata(1, en_token.shape[1] * self.tmp_window_size, 1)\nx = x.reshape(B, -1, self.tmp_window_size, D)\nx_with Masks = torch.cat([x, en_token.unsqueeze(2)], dim=2)\nnew_x = x_with Masksreshape(B, -1, D)\nreturn new_x\ndef Decoder_token_Retrieval(self, new_x):\n B, new_T, D = new_x.shape\nnum_masks = new_T // (self.interval + 1)\noriginal_T = new_T - num_masks\nmaskIndices = [(i + 1) * (self.interval + 1) - 1 for i in range(num_masks)]\nallIndices = list(range(new_T))\nmaskIndices = [i for i in allIndices if i not in maskIndices]\nmask Frames = new_x[;, maskIndices,:]\nreturn mask Frames\ndef forward(self, x):\n x_len = x.shape[-1]\nself.temp_window_size = choice(selfwindow_sizes)\nemb Frames = self.Patchify(x)\nif self.trainin:\n emb Frames_mask = self.apply_mask(emb Frames, mask_rate = self-mask_rate)\ninterleaving Frames = self.Encoder_token_Interleaving(emb Frames_mask)\npredictDSP = self.maedecoder(interleavingFrames)\nmae_loss = L1_loss(predictDSP, emb Frames)\nlatent_tokens = self.transformer Encoder(interleavingFrames)\nquery_token = self.Encoder_token_Retrieval(latent_tokens)\nQuantized_token, codes, allquantized = self.vq(query_token)\ncat_quantized = []\nfor q_emb in all_quantized:", + "guess_lang": "python", + "bbox": [ + 124, + 85, + 883, + 905 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling", + "bbox": [ + 207, + 56, + 764, + 70 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 475, + 922, + 495, + 934 + ], + "page_idx": 21 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "q_emb = q_emb.reshape(-1, q_emb.shape[-1]).unsqueeze(1) \ncat_quantized.append(q_emb) \ncat_quantized = torch.cat(cat_quantized, dim=1) \ngpt_loss = self.depth_gpt.compute_prior_loss(cat_quantized) \nde_interleaving Frames = self.Decoder_token_Interleaving(Quantized_token) \ndelatent_token = self.transformer Decoder(de_interleaving Frames) \nmask_tokens = self.Decoder_token_Retestval(de_forensic_token) \nx_ = self.UnPatchify mask_tokens) \nreturn x_, mae_loss, gpt_loss", + "guess_lang": "python", + "bbox": [ + 155, + 85, + 790, + 200 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "I. Limitation", + "text_level": 1, + "bbox": [ + 84, + 227, + 197, + 242 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "In this study, we present ALMTokenizer, a low-bitrate, semantic-rich audio codec tokenizer. We demonstrate that ALM-Tokenizer excels in both reconstruction and semantic information retention under low-bitrate conditions. However, we acknowledge that there is still significant room for improvement in reconstruction performance, particularly for sound and music data. Building an audio tokenizer for sound and music in the low-bitrate setting poses additional challenges. In terms of semantic information, ALMTokenizer still lags behind traditional SSL models. Although we propose several training losses to enhance semantic information in the codec model, the improvements are limited and, in some cases, negatively impact reconstruction quality. We recognize the need for a careful design and balance of these semantic loss terms. Additionally, the multi-stage training strategy increases training complexity. These training strategy brings waste. Most of the components are eventually discarded, e.g. MAE-transformer encoder/decoder, MAE-decoder, and depth AR-transformer. These components would have made sense to still utilize them for some purpose, e.g. the AR decoder could have been used to initialize the depth transformer in the Language modeling task. These concerns are left for future work.", + "bbox": [ + 84, + 253, + 888, + 420 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling", + "bbox": [ + 207, + 56, + 761, + 70 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 22 + } +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10344/cbca1f99-6d1c-4e0d-b299-960633d34c1c_model.json b/data/2025/2504_10xxx/2504.10344/cbca1f99-6d1c-4e0d-b299-960633d34c1c_model.json new file mode 100644 index 0000000000000000000000000000000000000000..f75862f33c3a885c83130144d02220e34e1104d1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/cbca1f99-6d1c-4e0d-b299-960633d34c1c_model.json @@ -0,0 +1,4063 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.266, + 0.061, + 0.707 + ], + "angle": 270, + "content": "arXiv:2504.10344v1 [cs.SD] 14 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.096, + 0.11, + 0.877, + 0.157 + ], + "angle": 0, + "content": "ALMTokenizer: A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.199, + 0.884, + 0.233 + ], + "angle": 0, + "content": "Dongchao Yang\\(^{1}\\) Songxiang Liu\\(^{2}\\) Haohan Guo\\(^{1}\\) Jiankun Zhao\\(^{1}\\) Yuanyuan Wang\\(^{1}\\) Helin Wang\\(^{2}\\) Zeqian Ju\\(^{2}\\) Xubo Liu\\(^{2}\\) Xueyuan Chen\\(^{1}\\) Xu Tan\\(^{2}\\) Xixin Wu\\(^{1}\\) Helen Meng\\(^{1}\\)" + }, + { + "type": "title", + "bbox": [ + 0.242, + 0.259, + 0.321, + 0.274 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.28, + 0.445, + 0.733 + ], + "angle": 0, + "content": "Recent advancements in audio language models have underscored the pivotal role of audio tokenization, which converts audio signals into discrete tokens, thereby facilitating the application of language model architectures to the audio domain. In this study, we introduce ALMTokenizer, a novel low-bitrate and semantically rich audio codec tokenizer for audio language models. Prior methods, such as Encodec, typically encode individual audio frames into discrete tokens without considering the use of context information across frames. Unlike these methods, we introduce a novel query-based compression strategy to capture holistic information with a set of learnable query tokens by explicitly modeling the context information across frames. This design not only enables the codec model to capture more semantic information but also encodes the audio signal with fewer token sequences. Additionally, to enhance the semantic information in audio codec models, we introduce the following: (1) A masked autoencoder (MAE) loss, (2) Vector quantization based on semantic priors, and (3) An autoregressive (AR) prediction loss. As a result, ALMTokenizer achieves competitive reconstruction performance relative to state-of-the-art approaches while operating at a lower bitrate. Within the same audio language model framework, ALMTokenizer outperforms previous tokenizers in audio understanding and generation tasks.\\(^{1}\\)" + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.762, + 0.218, + 0.779 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.787, + 0.474, + 0.819 + ], + "angle": 0, + "content": "The field of generative modeling has witnessed remarkable progress, largely driven by the success of autoregressive" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.827, + 0.475, + 0.867 + ], + "angle": 0, + "content": "*Equal contribution 1The Chinese University of Hong Kong, Hong Kong, China 2Independent Authors. Correspondence to: Dongchao Yang ." + }, + { + "type": "page_footnote", + "bbox": [ + 0.086, + 0.879, + 0.148, + 0.892 + ], + "angle": 0, + "content": "Pre-print." + }, + { + "type": "page_footnote", + "bbox": [ + 0.109, + 0.892, + 0.361, + 0.906 + ], + "angle": 0, + "content": "1http://dongchaoyang.top/ALMTokensizer/" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.26, + 0.888, + 0.442 + ], + "angle": 0, + "content": "(AR) models in the development of large language models (LLMs) (OpenAI, 2023). Inspired by the success of LLMs in the fields of natural language processing (NLP), recent works have begun to employ AR transformers for audio generation (Borsos et al., 2023a; Agostinelli et al., 2023; Yang et al., 2023c), such as using the AR transformer paradigm to solve text-to-speech task (Wang et al., 2023), or expanding the text LLM into multimodal LLM by integrating the audio modality into the original LLM (Défossez et al., 2024). Audio tokenizer plays an important role in all of these models, which converts audio signals into discrete token sequence for AR audio language modeling." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.448, + 0.889, + 0.903 + ], + "angle": 0, + "content": "In the literature, audio codec models, such as SoundStream (Zeghidour et al., 2021) and Encodec (Défossez et al., 2022), have been widely adopted as audio tokenizers for audio language models. These generative models aim to represent audio data in a quantized discrete latent space, where the codec's decoder is then used to reconstruct the audio signals from the generated discrete token sequences. Recently, there has been significant interest in the audio community regarding audio codec tokenizers, leading to the proposal of several novel models (Kumar et al., 2023; Ji et al., 2024; Défossez et al., 2024; Parker et al., 2024; Zhang et al., 2023). Despite the advancements in audio codec models, an important research question remains unanswered: which type of audio codec is most suitable for audio language modeling? Inspired by previous works (Borsos et al., 2023a; Parker et al., 2024; Ji et al., 2024; Défossez et al., 2024), these studies investigate two key properties of audio codec models: low bitrate and semantic richness. We first conduct a set of evaluation experiments to explore the influence of bitrate and semantic information on audio language modeling. Specifically, we train three audio codec models with varying bitrates, while keeping the number of vector quantization (VQ) layers constant and adjusting the frame rates to \\(50\\mathrm{Hz}\\), \\(25\\mathrm{Hz}\\), and \\(12.5\\mathrm{Hz}\\). We then train the audio language model using different audio tokenizers on the same dataset. To assess the impact of semantic information, we also train a \\(12.5\\mathrm{Hz}\\) semantic tokenizer and incorporate it into the audio language model. Further details can be found in Appendix B. Figure 1 presents the results, which show that: (1) low-bitrate audio codec models significantly en" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.209, + 0.057, + 0.764, + 0.071 + ], + "angle": 0, + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.085, + 0.477, + 0.252 + ], + "angle": 0, + "content": "hance training and inference efficiency; and (2) semantic information is more easily modeled by LM-based generative methods, e.g. lower PPL and loss. The experimental findings demonstrate the importance of constructing a low-bitrate and semantic-rich audio codec tokenizer for audio language modeling. Based on these results, we propose a novel audio codec tokenizer that offers the following advantages: (1) Low-bitrate: it compresses the audio data into fewer tokens; (2) Semantic-rich: it incorporates abundant semantic information; (3) AR-driven latent space: it optimizes the latent space for autoregressive (AR) modeling." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.259, + 0.478, + 0.606 + ], + "angle": 0, + "content": "To achieve this objective, we propose the following novel techniques: (1) We introduce a novel query-based compression strategy, which uses a set of learnable query tokens to capture holistic information by explicitly modeling the context information across audio frames with transformer layers. This strategy effectively takes advantage of the strong modeling capabilities of transformers to achieve better compression and semantic modeling. It also enables dynamic control over the compression rate by adjusting the number of query tokens. (2) To enhance semantic richness in the codec model, we introduce a Masked Autoencoder (MAE) loss, which encourages the model to capture more global information. (3) Inspired by previous works (Zhu et al., 2024), we propose the integration of semantic priors into the VQ layer. Specifically, we perform k-means clustering on the pre-trained wav2vec2 (Baevski et al., 2020) and BEATs (Chen et al., 2022b) encoder outputs, using the cluster centers to initialize the VQ layer. (4) We observe that AR models struggle to fit the distribution of the residuals in the VQ layers, with token prediction accuracy being notably lower in the second and third VQ layers compared to the first. To address this issue, we introduce an AR prediction loss to optimize the latent space." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.606, + 0.476, + 0.728 + ], + "angle": 0, + "content": "To evaluate the effectiveness of the ALMTokenizer, we first compare its reconstruction and semantic performance with previous state-of-the-art models. Using the same audio language model framework, we then demonstrate that ALMTokenizer achieves superior performance in LM-based audio understanding and generation tasks, including text-to-speech (TTS), speech-to-text (ASR), audio captioning, text-to-sound, text-to-music, and music captioning." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.746, + 0.236, + 0.763 + ], + "angle": 0, + "content": "2. Related Works" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.772, + 0.291, + 0.787 + ], + "angle": 0, + "content": "2.1. Audio Language Models" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.796, + 0.476, + 0.901 + ], + "angle": 0, + "content": "Recently, there has been a growing interest in bridging audio and text through multimodal learning approaches. Models such as AudioLM (Borsos et al., 2023a) leverage AR transformers and hierarchical modeling techniques to process audio data directly, learning representations that capture both linguistic and acoustic features. Inspired by AudioLM, VALL-E (Wang et al., 2023) and SPEAR-TTS (Kharitonov" + }, + { + "type": "image", + "bbox": [ + 0.515, + 0.082, + 0.872, + 0.351 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.497, + 0.364, + 0.887, + 0.392 + ], + "angle": 0, + "content": "Figure 1. The performance comparison when different types of tokenizer is used for audio modeling. PPL refers to perplexity." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.398, + 0.889, + 0.655 + ], + "angle": 0, + "content": "et al., 2023) formulate the text-to-speech task as an audio language modeling problem: generating an audio token sequence with the help of an autoregressive transformer. MusicLM (Agostinelli et al., 2023) and MusicGen (Copet et al., 2023) frame the text-to-music task as an audio language modeling problem. UniSep (Wang et al., 2025) explores using audio LM to solve audio separation tasks with the help of audio tokenizer. Moshi (Défossez et al., 2024), SpiRitLM (Nguyen et al., 2025), and GLM4-Voice (Zeng et al., 2024) explore speech-to-speech conversation. Furthermore, audio tokenizers can also be combined with discrete diffusion models (Yang et al., 2023d;a; Borsos et al., 2023b; Ju et al., 2024). In all of these models, the audio tokenizer plays a crucial role by transforming audio data into a discrete latent sequence, reducing computational demands compared to directly processing the audio signal, and enhancing the effectiveness and efficiency of the generation process." + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.671, + 0.646, + 0.685 + ], + "angle": 0, + "content": "2.2. Audio Tokenizer" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.695, + 0.889, + 0.905 + ], + "angle": 0, + "content": "In the literature, both semantic and acoustic tokenizers are widely employed in audio language models. The semantic tokenizer is trained using pre-trained self-supervised learning (SSL) models, such as Hubert (Hsu et al., 2021) and WavLM (Chen et al., 2022a). Applying k-means or vector quantization in these models generates semantic tokens (Zeng et al., 2024; Du et al., 2024; Liu et al., 2024). Previous works (Borsos et al., 2023a) demonstrate that semantic tokens are more easily modeled by language models. However, due to the loss of significant acoustic information in semantic tokens, they rely on an additional decoder to generate high-fidelity waveform, such as a diffusion model (Ho et al., 2020) or flow-matching (Lipman et al., 2022). Inevitably, this additional module results in increased infer" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.923, + 0.493, + 0.935 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.209, + 0.057, + 0.765, + 0.071 + ], + "angle": 0, + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + }, + { + "type": "image", + "bbox": [ + 0.136, + 0.095, + 0.319, + 0.288 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.329, + 0.095, + 0.838, + 0.287 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.31, + 0.888, + 0.339 + ], + "angle": 0, + "content": "Figure 2. The left part illustrates the framework of the previous audio codec, while the right part provides an overview of the proposed ALMTokensizer. \\( w \\) denotes the window size. The details of ALMTokensizer can be found in Section 3.2." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.362, + 0.375, + 0.376 + ], + "angle": 0, + "content": "ence complexity and poorer reconstruction." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.377, + 0.477, + 0.906 + ], + "angle": 0, + "content": "Acoustic tokenizer refers to audio codec models, trained for acoustic-level reconstruction tasks. Audio codec (Zeghidour et al., 2021; Defossez et al., 2022; Yang et al., 2023b; Kumar et al., 2023) have demonstrated exceptional performance in reconstructing high-quality audio. In general, these codec models consist of an encoder, a quantizer, and a decoder. Both the encoder and decoder are lightweight, resulting in minimal inference costs. Compared to semantic tokens, codec models can support audio, speech, and music domains, and their rich acoustic details mitigate the need for cascading architectures in downstream generative models. Recently, an increasing number of audio codec models have been proposed, focusing on (1) Better reconstruction quality, such as DAC (Kumar et al., 2023), Vocos (Siuzdak, 2023), SQ-Codec (Yang et al., 2024c;b) and APCodec (Ai et al., 2024); (2) Low-bitrate models, such as HiFiCodec (Yang et al., 2023b), wavtokenizer (Ji et al., 2024), StableCodec (Parker et al., 2024), and TS3-Coded (Wu et al., 2024); (3) Task-driven codec, designed for text-to-speech tasks, such as FACodec (Ju et al., 2024), SpeechTokenizer (Zhang et al., 2023), Single-Coded (Li et al., 2024), audio retrieval-based Tokenizers (Banerjee & Arora, 2022; van Niekerk et al., 2024). In this study, we focus on developing a low-bitrate, semantically rich audio codec tokenizer. The most closely related work to ours is MimiCodec (Defossez et al., 2024), which provides high-quality semantic information while achieving a low bitrate (1.1 kbps). However, MimiCodec relies on knowledge distillation from WavLM (Chen et al., 2022a) to the first VQ layer, whereas the remaining VQ layers do not incorporate semantic information. Furthermore, it is specifically designed for speech tasks and has not been validated for non-speech tasks, such as sound and music generation. In contrast to MimiCodec, our ALMTokens encode more semantic information across all VQ layers, achieves a lower bitrate, and is designed for both speech and" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.362, + 0.599, + 0.377 + ], + "angle": 0, + "content": "general sound." + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.396, + 0.673, + 0.414 + ], + "angle": 0, + "content": "3. Proposed Method" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.422, + 0.889, + 0.498 + ], + "angle": 0, + "content": "This section introduces the technical details of the proposed ALMTokensizer. Section 3.1 presents the framework of previous audio codec models. Section 3.2 presents the details of proposed audio codec framework. In Sections 3.3 and 3.4, we present the training loss and training strategies." + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.514, + 0.617, + 0.53 + ], + "angle": 0, + "content": "3.1. Preliminary" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.538, + 0.888, + 0.765 + ], + "angle": 0, + "content": "Previous audio codec (Défossez et al., 2022; Zeghidour et al., 2021) typically adopt an encoder-quantizer-decoder framework, as shown in the left part of Figure 2. The audio is encoded into several audio frames by the encoder. Then, residual vector quantization (RVQ) (Zeghidour et al., 2021) is used to quantize these audio frames. Lastly, the decoder is used to recover the waveform from the quantized audio frames. It can be observed that previous works treat each audio frame equally and rely on these quantized frames to recover the audio. However, such a strategy (1) ignores the fact that different audio frames encode different levels of information, which results in some audio frames being difficult to recover in low-bitrate settings (e.g., encoding the audio frames at \\(12.5\\mathrm{Hz}\\)); (2) fails to utilize the context information between different frames." + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.781, + 0.765, + 0.797 + ], + "angle": 0, + "content": "3.2. Query-based Audio Compression" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.804, + 0.889, + 0.895 + ], + "angle": 0, + "content": "To construct a low-bitrate, semantically rich audio codec model, we propose a query-based compression strategy. Our approach is inspired by the success of MAE (He et al., 2022), which applies a masking operation to the original image with a high mask rate (75%). With the help of a transformer encoder and decoder, it is possible to recover the masked" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.209, + 0.057, + 0.764, + 0.071 + ], + "angle": 0, + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.086, + 0.477, + 0.328 + ], + "angle": 0, + "content": "image content by utilizing the context information between different patches. Thus, we propose using a group of query tokens \\( {}^{2} \\) to capture holistic audio context information from the audio frames with the assistance of a transformer encoder. Since these query tokens include rich context information, it is possible to reconstruct the audio based on them. Then, a transformer decoder and mask tokens are employed to reconstruct the audio from the quantized query tokens. This strategy leverages the powerful modeling capabilities of transformers to achieve better compression and semantic modeling. Similar query-based strategies has been widely explored in previous works, such as BLIP2 (Li et al., 2023), SALMONN (Tang et al., 2024) and TiTok(Yu et al., 2024). The right part of Figure 2 illustrates the overall framework of ALMTokensizer. In the following sections, we detail each component and the associated training loss." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.334, + 0.478, + 0.593 + ], + "angle": 0, + "content": "Patchify and UnPatchify We explore two types of Patchify modules: (1) Following Encodec (Défossez et al., 2022), a convolution-based module, which encodes the audio data \\( \\mathbf{x} \\) into \\( e \\in \\mathcal{R}^{T \\times d} \\), where \\( T \\) and \\( d \\) denote the number of frames and the vector dimension, and (2) Following StableCodec (Parker et al., 2024), which directly uses a linear layer to encode the audio data into \\( e \\in \\mathcal{R}^{T \\times d} \\) and adds several transformer layers. Similarly, the UnPatchify mirrors the architecture of Patchify. If we use the Encodec-style Patchify module, the UnPatchify module substitutes stride convolutions with transposed convolutions and reverses the stride order. If we use the StableCodec-style Patchify module, the UnPatchify module includes a transformer block and a reshape operation. In our preliminary experiments, we find that the Encodec-style Patchify and UnPatchify modules bring better reconstruction performance. We adopt the Encodec-style Patchify module as our default setting." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.599, + 0.476, + 0.764 + ], + "angle": 0, + "content": "Token Interleaving The token interleaving module aims to combine two token sequences into a single sequence. In the encoder part, we combine the audio frames \\( e \\in \\mathcal{R}^{T \\times d} \\) and the query token [CLS]. Assuming a window size of \\( w \\), the query token will be inserted into the audio frame sequence at every \\( w \\)-intervals. In the decoder part, the token interleaving module is used to combine the quantized query tokens and learnable mask tokens. We insert \\( w \\) mask tokens before each query token. During the training stage, we dynamically choose the window size for each training iteration." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.772, + 0.475, + 0.833 + ], + "angle": 0, + "content": "Token Retrieval The token retrieval module aims to retrieve the relevant tokens from a sequence. In the encoder part, we use it to retrieve the learnable query tokens. In the decoder part, we use it to retrieve the learnable mask tokens." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.84, + 0.475, + 0.856 + ], + "angle": 0, + "content": "Query-based Transformer Encoder As the previous part" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.085, + 0.887, + 0.191 + ], + "angle": 0, + "content": "discussed, we introduce a learnable query token \\([\\mathrm{cls}] \\in \\mathcal{R}^{1 \\times d}\\) to capture holistic information from the audio frames \\(e\\). As Figure 2 shows, we first combine the audio frames and query token using a token interleaving module with a window size \\(w\\). Then, a transformer module is applied to model the whole sequence \\(e_a\\). After that, we employ a token retrieval module to extract the query tokens \\(h \\in \\mathcal{R}^{[T / w] \\times d}\\)." + }, + { + "type": "equation", + "bbox": [ + 0.547, + 0.198, + 0.887, + 0.234 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\boldsymbol {e} = P (\\boldsymbol {x}), \\boldsymbol {e} _ {\\boldsymbol {a}} = I n t e r l e a v i n g (\\boldsymbol {\\mathbf {e}}, \\boldsymbol {c l s}, w), \\tag {1} \\\\ \\boldsymbol {e} _ {\\boldsymbol {a}} = E n (\\boldsymbol {e} _ {\\boldsymbol {a}}), \\boldsymbol {h} = R e c t r i e v a l (\\boldsymbol {e} _ {\\boldsymbol {a}}, w) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.241, + 0.886, + 0.271 + ], + "angle": 0, + "content": "where \\( P(\\cdot) \\) denotes the Patchify module. \\( En(\\cdot) \\) denotes the transformer encoder." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.279, + 0.887, + 0.491 + ], + "angle": 0, + "content": "Residual Vector Quantization To build a low-bitrate audio codec, we empirically set the number of RVQ layers to 3, since we found that 3 RVQ layers suffice to build an effective audio codec model: \\(\\hat{h} = Q(h)\\). Inspired by previous works (Zhu et al., 2024; Yang et al., 2024a), we first obtain the k-means clusters of Wav2vec2 (Baevski et al., 2020) to represent the speech semantic prior, and the k-means clusters of the BEATs (Chen et al., 2022b) to represent the general sound semantic prior. Assuming the codebook size is \\(C\\), we set \\(C / 2\\) to represent speech, with the remaining portion representing general sound. We then use these semantic priors to initialize the codebook of the VQ layer and fix it. Next, we apply a linear layer to map the input features into the VQ layer." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.498, + 0.887, + 0.604 + ], + "angle": 0, + "content": "Query-based Transformer Decoder To recover the audio information, we construct a reverse process using the encoder part. We first use the token interleaving module to combine the mask token \\( m \\in \\mathcal{R}^{1 \\times d} \\) with \\( \\hat{\\pmb{h}} \\). The new sequence is then modeled by a transformer module. We expect that these mask tokens can be used to recover the audio information with the help of the Unpatchify module." + }, + { + "type": "equation", + "bbox": [ + 0.54, + 0.611, + 0.887, + 0.647 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\boldsymbol {q} _ {\\boldsymbol {a}} = \\text {I n t e r l e a v i n g} (\\hat {\\boldsymbol {h}}, \\boldsymbol {m}, w), \\boldsymbol {q} _ {\\boldsymbol {a}} = D e (\\boldsymbol {q} _ {\\boldsymbol {a}}) \\tag {2} \\\\ \\boldsymbol {e} _ {\\boldsymbol {o}} = \\operatorname {R e c t r i e v a l} (\\boldsymbol {q} _ {\\boldsymbol {a}}, w), \\hat {\\boldsymbol {x}} = U n P (\\boldsymbol {e} _ {\\boldsymbol {o}}), \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.654, + 0.887, + 0.684 + ], + "angle": 0, + "content": "where \\(Unp(\\cdot)\\) denotes the Unpatchify module. \\(De(\\cdot)\\) denotes the transformer decoder." + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.702, + 0.628, + 0.716 + ], + "angle": 0, + "content": "3.3. Training Loss" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.725, + 0.887, + 0.906 + ], + "angle": 0, + "content": "Similar to previous audio CODECs, our approach is based on a GAN objective, where we optimize both the generator (which consists of the Patchify module, transformer encoder, quantizer, transformer decoder, and UnPatchify module) and the discriminators. For the generator, the training loss comprises four components: (1) reconstruction loss term; (2) adversarial loss term; (3) Masked AutoEncoder (MAE) loss; and (4) AR prediction loss. The reconstruction and adversarial losses typically follow previous works (Défossez et al., 2022; Zeghidour et al., 2021). In the following, we describe the MAE loss and AR prediction loss. More details of training loss refer to Appendix G." + }, + { + "type": "page_footnote", + "bbox": [ + 0.086, + 0.864, + 0.475, + 0.892 + ], + "angle": 0, + "content": "2Query tokens are learnable embedding vectors that are updated throughout the training process." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.209, + 0.058, + 0.763, + 0.071 + ], + "angle": 0, + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.085, + 0.477, + 0.297 + ], + "angle": 0, + "content": "MAE Loss As we discussed in Section 1, a semantic-rich audio codec tokenizer is better suited for audio language modeling. Inspired by the success of MAE (He et al., 2022), we propose to incorporate an MAE loss during the training of the audio codec. Specifically, for the frame sequence \\( e \\), we randomly choose several audio frame features and set these frames to zero, \\( e_m = \\mathrm{Mask}(e) \\). We pass the masked features \\( e_m \\) into the encoder transformer. Then, the encoded features are passed into an MAE-decoder transformer block to predict \\( e \\). In our experiments, we adopt a dynamic mask rate (from 0.2 to 0.3), we found that using a large mask rate will significantly influence the reconstruction performance. Following MAE (He et al., 2022), we apply the MSE loss to the masked audio frames." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.304, + 0.478, + 0.562 + ], + "angle": 0, + "content": "AR Loss As shown in figure 3, we find that the first layer of RVQ-based audio codec models is easier to fit for the audio language model than the other layers (e.g., layers 2 and 3). One possible reason is that the first layer encodes more semantically related information. For speech data, most of the content information can be recovered by the first VQ layer, while the residual layers primarily encode acoustic-level information, which influences speech quality. To make the tokens in the residual layer easier to fit, we introduce an autoregressive (AR) prediction prior (Wang et al., 2024a) in the RVQ latent space. Specifically, we introduce a lightweight continuous autoregressive (AR) transformer3, which is used to conduct next-token prediction in the RVQ layer. For example, it is tasked with predicting the quantized feature of the third VQ layer based on the features of the first and second VQ layers. We use mean squared error (MSE) loss for optimization." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.578, + 0.318, + 0.594 + ], + "angle": 0, + "content": "3.4. Two-stage Training Strategy" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.601, + 0.477, + 0.829 + ], + "angle": 0, + "content": "Although training the ALMTokenizer using the typical Encoder (Défossez et al., 2022) setting is feasible, we introduce a two-stage training paradigm to improve both reconstruction performance and semantic information. Our motivation stems from the fact that audio codec quantization focuses on modeling local relationships, whereas semantic information focuses on modeling global relationships. These two goals are in conflict. To resolve this conflict, we present a two-stage training strategy. In the first stage, we do not incorporate the quantization part; instead, we train directly an AutoEncoder with Patchify and UnPatchify modules. To encode more semantic information in the Patchify module, we introduce MAE loss during this stage, by adding transformer-based MAE-encoder and decoder. The encoder processes the masked frame sequence, and the decoder pre" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.085, + 0.887, + 0.177 + ], + "angle": 0, + "content": "dicts the masked part. After training, the transformer encoder and decoder are discarded. In the second stage, we first initialize the ALMTokensizer's Patchify and UnPatchify modules with the checkpoint from the first stage, and freeze the parameters of the Patchify module. Then, we train the model using the training loss described in Section 3.3." + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.196, + 0.63, + 0.212 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.497, + 0.221, + 0.733, + 0.236 + ], + "angle": 0, + "content": "4.1. Dataset and Training Details" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.245, + 0.888, + 0.457 + ], + "angle": 0, + "content": "Data preparation for the audio codec ALMTokensizer is trained on approximately 4,500 hours of data. In the speech domain, we utilize LibriTTS training set (Zen et al., 2019) and a subset of Multilingual LibriSpeech (MLS) (Pratap et al., 2020), with 2,000 hours randomly selected. In the sound domain, we utilize a subset of AudioSet, with 1,000 hours randomly selected; in the music domain, we employ a subset of the Million Song Dataset (Bertin-Mahieux et al., 2011), also with 1,000 hours randomly selected. We evaluate the codec's speech reconstruction performance using a subset of the VCTK dataset (Veaux et al., 2017), and assess both audio and music reconstruction performance using the AudioCaps (Kim et al., 2019) validation set and the MusicCaps dataset (Agostinelli et al., 2023), respectively." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.464, + 0.889, + 0.6 + ], + "angle": 0, + "content": "Data for Audio Language Models To assess the effectiveness of the proposed audio tokenizer, we construct an audio language model framework to perform six audio-related tasks. The details are provided in Appendix D.3 and D.4. For speech data, we select 2,000 hours of speech-text pairs from LibriHeavy (Kang et al., 2024). For sound data, we utilize the AudioCaps training set and BBC Sound Effects. For music data, we use a subset of the Million Song dataset and the caption data from LP-MusicCaps (Doh et al., 2023)." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.607, + 0.889, + 0.879 + ], + "angle": 0, + "content": "Implementation Details ALMTokenizer first performs patchification on the audio data, we set the patch size to 320 in all of experiments, which encodes 1 second of \\(24\\mathrm{kHz}\\) audio into 75 frames. For the Encoder-style Patchify module, we adopt the settings from Encodec (Défossez et al., 2022) encoder. To enable streaming for the codec model, a causal convolution layer is employed. For the encoder-transformer and decoder-transformer components, we use 24 self-attention layers, with latent dimensions of 256 and 512, respectively. Following StableCodec (Parker et al., 2024), the self-attention mechanism uses a causal sliding attention window of 64 steps to restrict the receptive field and promote the generalization of the architecture to sequences of arbitrary length. Rotary Positional Embeddings (RoPE) are used. Refer to Appendix G for the details of ALMTokenizer model training. For the audio language model, we follow the framework of Moshi (Défossez et al., 2024). For further details, refer to Appendix A." + }, + { + "type": "page_footnote", + "bbox": [ + 0.085, + 0.837, + 0.477, + 0.903 + ], + "angle": 0, + "content": "3The term continuous autoregressive (AR) transformer is used to distinguish our approach from traditional discrete AR models, which operate on discrete token sequences and are optimized using cross-entropy loss. In our study, to facilitate gradient backpropagation, we apply the AR transformer directly to continuous features." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.209, + 0.057, + 0.764, + 0.071 + ], + "angle": 0, + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + }, + { + "type": "table_caption", + "bbox": [ + 0.085, + 0.094, + 0.887, + 0.146 + ], + "angle": 0, + "content": "Table 1. The speech reconstruction and semantic performance comparison between the ALMTokensizer and previous tokenizers. FPS denotes that the frame number in one second. TPS denotes that the token number in one second. CS denotes the codebook size, BR denotes the bit-rate. ST denotes speechtokenizer. Bold for the best result and underline for the second-best result. Evaluation on VCTK dataset." + }, + { + "type": "table", + "bbox": [ + 0.089, + 0.147, + 0.911, + 0.309 + ], + "angle": 0, + "content": "
ModelsFPS/TPSCS/BRReconstructionSemantic
UTMOS (↑)DNS-MOS (↑)VISQOL (↑)STOI (↑)PESQ (↑)ASR (↓)ER (↑)
Hubert (Hsu et al., 2021)-------6.531.0
WavLM (Chen et al., 2022a)-------6.229.0
Encodec (Défossez et al., 2022)50/1501024/1.5kbps2.583.273.640.812.035.326.5
DAC (Kumar et al., 2023)50/1501024/1.5kbps3.133.413.670.812.144.117.6
Wavtokenizer (Ji et al., 2024)40/404096/0.48kbps3.673.503.720.791.944.619.8
StableCodec (Parker et al., 2024)25/2546656/0.4kbps4.223.643.400.761.898.315.8
ST (Zhang et al., 2023)50/1501024/1.5kbps3.413.363.680.791.719.827.0
Mimi (Défossez et al., 2024)12.5/37.52048/0.41kbps3.013.143.280.751.525.128.0
Mimi (Défossez et al., 2024)12.5/1002048/1.1kbps3.653.383.820.822.123.828.3
ALMTokensizer (Ours)12.5/37.52048/0.41kbps3.763.643.780.812.018.329.0
" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.333, + 0.254, + 0.347 + ], + "angle": 0, + "content": "4.2. Evaluation Metrics" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.357, + 0.477, + 0.418 + ], + "angle": 0, + "content": "We evaluate the performance of previous SOTA audio tokenizers, and our proposed ALMTokensizer across audio reconstruction, audio semantic information, audio understanding, and audio generation tasks." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.425, + 0.475, + 0.531 + ], + "angle": 0, + "content": "Audio Reconstruction For speech reconstruction, we use DNS-MOS, UT-MOS, PESQ, STOI (Short-time Objective Intelligibility), and VISQOL. For sound and music data evaluation, VISQOL (audio version), STFT loss, and Mel loss are used. Furthermore, following (Kumar et al., 2023), the MUSHRA subjective test is conducted for speech, sound, and music. Refer to Appendix D for more details." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.538, + 0.475, + 0.735 + ], + "angle": 0, + "content": "Audio Semantic Information Previous SSL models, such as Hubert (Hsu et al., 2021), have shown that semantic-rich representation can be used to solve downstream recognition tasks by fine-tuning several adaptor layers. Thus, we can validate the performance of features of the audio tokenizer for downstream recognition tasks. For speech data, we conduct the automatic speech recognition (ASR) task on the LibriSpeech (Panayotov et al., 2015) dataset, and the emotion classification (EC) task on the EMOVO (Costantini et al., 2014) dataset. For sound data, we conduct sound classification tasks on the ESC-50 dataset (Piczak, 2015). For music data, we conduct music classification tasks on the Medley-solos-DB dataset (Lostanlen & Cella, 2016)." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.742, + 0.475, + 0.892 + ], + "angle": 0, + "content": "Audio Understanding To further validate whether the audio tokenizer is suitable for building an audio language model, we propose to conduct an understanding task using discrete tokens. We conduct three tasks: ASR, audio caption, and music caption. For the audio data, we use the audio tokenizer to transform it into discrete tokens, and for text data, we use the BPE tokenizer of LLAMA 3.2. For audio and music caption, we follow (Drossos et al., 2020) and adopt BLEU-1, BLEU-2, BLEU-3, METEOR, ROUGE-L, CIDEr-D, SPICE, and SPIDER metrics." + }, + { + "type": "table_caption", + "bbox": [ + 0.497, + 0.318, + 0.887, + 0.374 + ], + "angle": 0, + "content": "Table 2. The sound reconstruction performance comparison between the proposed ALMTokensizer and previous audio tokenizer models. SC denotes the sound classification task. Evaluation on AudioCaps validation set." + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.379, + 0.884, + 0.474 + ], + "angle": 0, + "content": "
ModelsViSQOL (↑)Mel loss (↓)STFT loss (↓)SC (↑)
BEATs---24%
Wav2vec2---53%
Encodec3.0516.31.2315%
DAC2.9817.61.2420%
Wavtokenizer2.1832.72.5012%
Ours2.9915.01.2444%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.497, + 0.502, + 0.887, + 0.556 + ], + "angle": 0, + "content": "Table 3. The music reconstruction and semantic performance comparison between the ALMTokensizer and previous audio tokenizers. MC denotes the music classification task. Evaluation on Musiccaps dataset." + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.561, + 0.886, + 0.656 + ], + "angle": 0, + "content": "
ModelsViSQOL (↑)Mel loss (↓)STFT loss (↓)MC (↑)
BEATs---54%
Wav2vec2---65%
Encodec4.0434.81.2645%
DAC4.0635.91.2848%
Wavtokenizer3.8548.21.4754%
Ours3.9634.41.3259%
" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.685, + 0.887, + 0.73 + ], + "angle": 0, + "content": "Audio Generation We also conduct audio generation tasks, including text-to-speech, text-to-sound, and text-to-music. Refer to Appendix D for more details." + }, + { + "type": "title", + "bbox": [ + 0.497, + 0.747, + 0.862, + 0.761 + ], + "angle": 0, + "content": "4.3. The Reconstruction and Semantic Performance" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.77, + 0.887, + 0.905 + ], + "angle": 0, + "content": "We first compare the reconstruction and semantic performance of ALMTokensizer with previous audio tokenizers. Table 1 presents the speech reconstruction and semantic results. We observe the following: (1) In terms of reconstruction, ALMTokensizer achieves impressive results in the low-bitrate setting. For example, compared with previous SOTA models, MimiCodec and Wavtokenizer, ALMTokensizer achieves better reconstruction performance at a lower bitrate. We also note that StableCodec performs well on UT-" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.209, + 0.058, + 0.763, + 0.071 + ], + "angle": 0, + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + }, + { + "type": "table_caption", + "bbox": [ + 0.085, + 0.094, + 0.477, + 0.147 + ], + "angle": 0, + "content": "Table 4. The LM-based TTS and ASR results. The first three metrics are used for TTS, while the last one is used for ASR. GLM4-Voice (Zeng et al., 2024) is a single layer semantic tokenizer. Evaluation on LibriSpeech test clean set." + }, + { + "type": "table", + "bbox": [ + 0.089, + 0.149, + 0.48, + 0.254 + ], + "angle": 0, + "content": "
ModelsWER (↓)DNSMOS (↑)UT-MOS (↑)ASR (↓)
GLM4-voice9.93.963.7916.3 ± 1.5
DAC24.53.142.0658.4 ± 1.2
Encodec22.93.482.1477.2 ± 2.3
StableCodec22.73.633.7028.0 ± 1.9
Wavtokenizer18.53.723.5845.6 ± 2.7
MimiCodec16.03.672.9323.1 ± 1.5
Ours11.73.753.8819.6 ± 1.8
" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.286, + 0.477, + 0.755 + ], + "angle": 0, + "content": "MOS. The main reason is that StableCodec has denoising capabilities, while the original audio includes some noise. This explains why StableCodec achieves good results on UTMOS but performs poorly on PESQ and STOI. (2) In terms of semantic information, ALMTokensizer demonstrates superior performance, e.g., ALMTokensizer outperforms previous SOTA models, such as Wavtokenizer and StableCodec \\(^{4}\\). Notably, in the emotion classification task, ALMTokensizer achieves performance comparable to previous SSL models, such as Hubert and WavLM. However, we also note that ALMTokensizer still lags behind these SSL models in ASR performance. We speculate that the inclusion of acoustic information may detract from ASR performance, despite ALMTokensizer containing rich semantic information. Table 2 and 3 show the sound and music experimental results. We can see that ALMTokensizer demonstrates strong reconstruction performance under the low-bitrate setting. Compared to WavTokenizer, the reconstruction performance shows significant improvement. Furthermore, we also note that sound and music are inherently more complex than speech, and encoding them at very low-bitrate remains a challenge. In terms of semantic information, ALMTokensizer significantly surpasses previous works, such as WavTokenizer and Encodec. In comparison with SSL models, BEATs (Chen et al., 2022b) and Wav2vec2-audioset version, ALMTokensizer shows comparable performance. We also perform the MUSHRA subjective test for the reconstruction performance. As shown in Table 7, we find that ALMTokensizer effectively maintains strong subjective reconstruction performance on speech, music, and audio, even with a very low-bitrate setting." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.771, + 0.439, + 0.785 + ], + "angle": 0, + "content": "4.4. Audio Understanding and Generation Results" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.794, + 0.476, + 0.87 + ], + "angle": 0, + "content": "Speech Understanding and Generation Tasks Table 4 shows the LM-based TTS and ASR results. For the TTS task, we mainly focus on robustness and speech quality. In terms of robustness, we can see that the GLM4-voice tokenizer (Zeng et al., 2024), MimiCodec, and the pro" + }, + { + "type": "image", + "bbox": [ + 0.531, + 0.085, + 0.852, + 0.267 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.497, + 0.284, + 0.88, + 0.298 + ], + "angle": 0, + "content": "Figure 3. The performance comparison with or without AR loss." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.33, + 0.888, + 0.601 + ], + "angle": 0, + "content": "posed ALMTokensizer bring better performance than others, highlighting the importance of semantic information for LM-based speech generation. Compared to previous audio codec tokenizers, ALMTokensizer brings significant improvement. In terms of generated speech quality, ALMTokensizer also shows great advantages, further demonstrating that the proposed tokenizer is more suitable for audio language modeling. Similarly, when we conduct the ASR task using discrete tokens as input, semantic information is also important. Traditional audio codec models perform poorly in this setting, such as DAC, Encodec, and WavTokenizer. StableCodec was fine-tuned by using a CTC head to predict the force-aligned phoneme tags from pre-bottleneck latents. MimiCodec distills the semantic information from WavLM. Thus, they have better performance than previous codec models. In ALMTokensizer, we propose a novel codec framework and training loss to better encode semantic information in the codec model." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.609, + 0.889, + 0.76 + ], + "angle": 0, + "content": "Sound/music Understanding and Generation Results We conduct text-to-sound, text-to-music, audio caption and music caption tasks within the same audio language model framework. The experimental results shown in Table 5 indicate that ALMTokensizer shows better performance in both audio caption and audio generation tasks, further demonstrating its advantages. We put more audio tokenizer reconstruction performance experiments on Appendix F, including evaluation on LibriTTS test set, length generalization, and compared to diffusion-based audio codec models." + }, + { + "type": "title", + "bbox": [ + 0.497, + 0.777, + 0.637, + 0.792 + ], + "angle": 0, + "content": "4.5. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.8, + 0.888, + 0.906 + ], + "angle": 0, + "content": "In order to gain a more comprehensive understanding of ALMTokensizer, we systematically compared each key component using a controlled experimental setup, employing identical architectures and hyperparameters across all trials. The Effectiveness of Query-based Audio Compression In this study, we propose a query-based audio compression strategy for compressing audio data in a very low-bitrate" + }, + { + "type": "page_footnote", + "bbox": [ + 0.086, + 0.879, + 0.476, + 0.906 + ], + "angle": 0, + "content": "4StableCodec's feature dimension is 6, it is hard to apply it for down-streaming task by simple fine-tuning" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.209, + 0.058, + 0.763, + 0.071 + ], + "angle": 0, + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + }, + { + "type": "table_caption", + "bbox": [ + 0.085, + 0.094, + 0.887, + 0.121 + ], + "angle": 0, + "content": "Table 5. The LM-based sound, music understanding and generation. B1, B2, B3, RG, ME, CD, SP, and SD denote BLEU-1, BLEU-2, BLEU-3, METEOR, ROUGE-L, CIDEr-D, SPICE, and SPIDER, respectively. Evaluation on Audiocaps and Musiccaps datasets." + }, + { + "type": "table", + "bbox": [ + 0.104, + 0.121, + 0.87, + 0.274 + ], + "angle": 0, + "content": "
ModelsUnderstandingGeneration
B1 (↑)B2(↑)B3 (↑)ME (↑)RG (↑)CD (↑)SP (↑)SD (↑)FD (↓)FAD (↓)KL (↓)
Sound Task
Encodec0.250.150.080.110.240.570.140.3510.038.221.73
DAC0.260.150.080.110.260.510.130.3214.1411.71.55
Wavtokenizer0.240.140.080.100.220.380.110.256.764.551.28
ALMTokensizer (Ours)0.280.170.110.120.240.600.150.374.116.160.55
Music Task
Encodec0.300.140.080.110.230.370.090.237.225.481.06
DAC0.290.140.080.110.230.370.090.2312.898.361.68
Wavtokenizer0.190.060.020.060.130.060.050.054.3911.930.88
ALMTokensizer (Ours)0.340.150.070.130.250.440.100.273.554.580.43
" + }, + { + "type": "table_caption", + "bbox": [ + 0.086, + 0.298, + 0.887, + 0.325 + ], + "angle": 0, + "content": "Table 6. Ablation study of codec framework, training loss, and training strategy. ASR and ER are used to evaluate the semantic information. The others are used to evaluate the reconstruction performance. Experiments conduct on VCTK dataset." + }, + { + "type": "table", + "bbox": [ + 0.102, + 0.325, + 0.87, + 0.527 + ], + "angle": 0, + "content": "
SettingUTMOS (↑)DNSMOS (↑)VISQOL (↑)PESQ (↑)STOI (↑)ASR (↓)ER (↑)
ALMTokensizer3.763.643.782.00.8118.329.0
Framework ablation
w/o the query-based framework2.493.133.371.580.7734.522.6
w/o Three additional loss3.543.413.441.690.7827.224.5
Training loss ablation
w/o semantic prior for VQ3.793.663.782.120.8319.228.4
w/o MAE loss3.703.763.832.100.8224.523.2
w/o AR loss3.723.813.802.080.8218.830.2
Different Patchify module
use Linear-Patchify3.473.363.271.780.7820.326.7
Training strategy ablation
w/o two-stage training3.603.393.241.550.7422.825.9
" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.553, + 0.477, + 0.688 + ], + "angle": 0, + "content": "setting. To validate its effectiveness, we follow previous audio codec models, such as MimiCodec (Défossez et al., 2024). In the encoder part, we use a stride size of [8, 6, 5, 4, 2] to compress 1-second, \\(24\\mathrm{kHz}\\) audio into \\(12.5\\mathrm{Hz}\\), followed by applying 3 RVQ layers to quantize it. As shown in Table 6, using previous audio codec frameworks makes it difficult to maintain good reconstruction performance in very low-bitrate settings. As a result, the proposed query-based compression method is more effective in this setting." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.688, + 0.474, + 0.809 + ], + "angle": 0, + "content": "The Influence of Semantic Prior for VQ To explore the influence of semantic priors on the audio codec model, we conduct an experiment where we remove the semantic prior and instead train a learnable RVQ following Encodec. As shown in Table 6, we find that updating the RVQ layer improves reconstruction performance but reduces semantic information, demonstrating that integrating semantic priors into the VQ layer enhances semantic information." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.81, + 0.474, + 0.899 + ], + "angle": 0, + "content": "The Influence of MAE Loss We also conduct experiments to evaluate the effectiveness of the MAE loss. As shown in Table 6, we find that the MAE loss is crucial for enhancing the semantic information in the codec model. Although the MAE loss has a slight negative effect on reconstruction, it is a crucial factor in building a better audio tokenizer." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.553, + 0.887, + 0.658 + ], + "angle": 0, + "content": "The Influence of AR Loss From Table 6, we observe that adding the AR loss reduces reconstruction performance. In Figure 3, we compare token prediction accuracy and TTS performance with and without LM loss. We observe that using LM loss significantly improves token prediction accuracy, particularly for the second and third VQ layers, which shows the effectiveness of our motivation and solution." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.659, + 0.887, + 0.901 + ], + "angle": 0, + "content": "The Influence of Two-stage Training As Table 6 shows, the two-stage training strategy is crucial as it significantly improves reconstruction performance and semantic information in the codec model. The Influence of Patchify Module We investigate two types of Patchify modules: Encode-style and StableCodec-style. As shown in Table 6, using Encode-style Patchify modules yields better performance. One possible reason is that StableCodec-style Patchify modules (Parker et al., 2024) may depend on larger data and model parameters, as the original paper scales their model to 1B. In contrast, we use only four transformer layers to ensure a fair comparison with Encode-style modules. Due to page limitations, we defer the ablation study on the influence of window size \\( w \\) in query-based compression, codebook size, the influence of mask-rate, and model size on reconstruction to Appendix C." + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.209, + 0.057, + 0.764, + 0.071 + ], + "angle": 0, + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + }, + { + "type": "table_caption", + "bbox": [ + 0.085, + 0.094, + 0.887, + 0.121 + ], + "angle": 0, + "content": "Table 7. The subjective reconstruction results using MUSHRA (comparative scoring of samples) of codec models on speech, sound and music. Bold for the best result and underline for the second-best result." + }, + { + "type": "table", + "bbox": [ + 0.13, + 0.127, + 0.845, + 0.294 + ], + "angle": 0, + "content": "
ModelsFPS/TPSCS/BRSpeech (↑)Sound (↑)Music (↑)
Speech
MimiCodec (3 RVQ) (Défossez et al., 2024)12.5/37.52048/0.41kbps65.61 ± 5.2--
MimiCodec (8 RVQ) (Défossez et al., 2024)12.5/1002048/1.1kbps86.7 ± 2.3--
StableCodec (Parker et al., 2024)25/2546656/0.4kbps81.7 ± 4.4--
SpeechTokenizer (Zhang et al., 2023)50/1501024/1.5bps73.7 ± 4.6--
Audio
Encodec (Défossez et al., 2022)50/1501024/1.5bps75.1 ± 3.977.2 ± 4.273.7 ± 4.6
DAC (Kumar et al., 2023)50/1501024/1.5bps79.3 ± 4.271.3 ± 4.171.3 ± 4.1
Wavtokenizer (Défossez et al., 2022)40/404096/0.48bps84.0 ± 2.163.1 ± 4.654.1 ± 5.4
Ours12.5/37.52048/0.41kbps84.8 ± 3.772.4 ± 4.769.0 ± 4.5
" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.319, + 0.194, + 0.333 + ], + "angle": 0, + "content": "4.6. Discussion" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.343, + 0.475, + 0.417 + ], + "angle": 0, + "content": "In this section, we discuss two fundamental questions in audio tokenization. Question 1: Is a single quantization layer better than multiple quantization layers? Question 2: Does a low-bit rate with high reconstruction performance define a good audio tokenizer?" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.418, + 0.477, + 0.584 + ], + "angle": 0, + "content": "Question 1 Although WavTokenizer and StableCodec demonstrate the potential to build a low-bitrate audio codec tokenizer with a single quantization layer, they rely on a higher frame rate (e.g., 25 or \\(40\\mathrm{Hz}\\)). As shown in Figure 1, a lower frame rate (e.g., \\(12.5\\mathrm{Hz}\\)) is critical for improving training efficiency. Thanks to UniAudio (Yang et al., 2023c) and Moshi's (Défossez et al., 2024) audio language model framework, multiple quantization layers do not increase the sequence length. Therefore, multiple quantization layers present an effective approach for building a low-bitrate, semantically rich audio codec." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.585, + 0.478, + 0.825 + ], + "angle": 0, + "content": "Question 2 To address this question, we present two comparisons. First, as shown in Tables 4 and 1, StableCodec exhibits better reconstruction performance and a lower bit-rate compared to WavTokenizer. However, when applied to the text-to-speech generation task, WavTokenizer demonstrates better robustness. One possible reason for this is that StableCodec uses a large-scale codebook size (46,656), which may increase the modeling complexity. Second, although MimiCodec has a higher bit-rate and poorer reconstruction performance than StableCodec, it demonstrates more stable TTS generation performance and better ASR performance. This phenomenon further underscores the importance of semantic information. In summary, a good audio tokenizer for an audio language model should not only consider low-bitrate and reconstruction, but also account for the semantic information in the codec model." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.845, + 0.206, + 0.86 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.871, + 0.476, + 0.903 + ], + "angle": 0, + "content": "In this study, we present a low-bitrate, semantically rich audio codec tokenizer. Specifically, we propose a query-based" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.319, + 0.888, + 0.456 + ], + "angle": 0, + "content": "compression strategy to effectively compress the audio data into a low-bitrate format while incorporating more semantic information. Furthermore, we introduce several training losses to enhance semantic information, including MAE loss and AR loss. Extensive experiments demonstrate the effectiveness of ALMTokensizer. Within the same audio language modeling framework, ALMTokensizer exhibits superior performance in both understanding and generation tasks. We discuss the limitation of this study in Appendix I." + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.474, + 0.655, + 0.49 + ], + "angle": 0, + "content": "Ethical Statement" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.501, + 0.889, + 0.606 + ], + "angle": 0, + "content": "This paper presents an audio tokenizer for audio language models, which can be applied to various audio generation tasks, such as text-to-speech and text-to-music. There is potential for misuse in generating misinformation, deepfake audio, or other harmful content. We advocate for the development of a detection model to identify audio produced by the codec model and generated by other generative models." + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.626, + 0.596, + 0.642 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.498, + 0.649, + 0.889, + 0.711 + ], + "angle": 0, + "content": "Agostinelli, A., Denk, T. I., Borsos, Z., Engel, J., Verzetti, M., Caillon, A., Huang, Q., Jansen, A., Roberts, A., Tagliasacchi, M., et al. Musicl: Generating music from text. arXiv preprint arXiv:2301.11325, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.498, + 0.719, + 0.889, + 0.782 + ], + "angle": 0, + "content": "Ai, Y., Jiang, X.-H., Lu, Y.-X., Du, H.-P., and Ling, Z.-H. Apocodec: A neural audio codec with parallel amplitude and phase spectrum encoding and decoding. arXiv preprint arXiv:2402.10533, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.498, + 0.79, + 0.889, + 0.851 + ], + "angle": 0, + "content": "Baevski, A., Zhou, Y., Mohamed, A., and Auli, M. wav2vec 2.0: A framework for self-supervised learning of speech representations. Advances in neural information processing systems, 33:12449-12460, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.498, + 0.861, + 0.889, + 0.906 + ], + "angle": 0, + "content": "Banerjee, A. and Arora, V. wav2tok: Deep sequence tokenizer for audio retrieval. In The Eleventh International Conference on Learning Representations, 2022." + }, + { + "type": "list", + "bbox": [ + 0.498, + 0.649, + 0.889, + 0.906 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.924, + 0.493, + 0.935 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.209, + 0.057, + 0.764, + 0.072 + ], + "angle": 0, + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.085, + 0.476, + 0.146 + ], + "angle": 0, + "content": "Bertin-Mahieux, T., Ellis, D. P., Whitman, B., and Lamere, P. The million song dataset. In Proceedings of the 12th International Conference on Music Information Retrieval (ISMIR 2011), 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.155, + 0.476, + 0.233 + ], + "angle": 0, + "content": "Borsos, Z., Marinier, R., Vincent, D., Kharitonov, E., Pietquin, O., Sharifi, M., Roblek, D., Teboul, O., Grangier, D., Tagliasacchi, M., et al. Audiolm: a language modeling approach to audio generation. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 2023a." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.241, + 0.476, + 0.302 + ], + "angle": 0, + "content": "Borsos, Z., Sharifi, M., Vincent, D., Kharitonov, E., Zeghidour, N., and Tagliasacchi, M. Soundstorm: Efficient parallel audio generation. arXiv preprint arXiv:2305.09636, 2023b." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.311, + 0.476, + 0.387 + ], + "angle": 0, + "content": "Chen, S., Wang, C., Chen, Z., Wu, Y., Liu, S., Chen, Z., Li, J., Kanda, N., Yoshioka, T., Xiao, X., et al. Wavlm: Large-scale self-supervised pre-training for full stack speech processing. IEEE Journal of Selected Topics in Signal Processing, 16(6):1505-1518, 2022a." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.397, + 0.476, + 0.443 + ], + "angle": 0, + "content": "Chen, S., Wu, Y., Wang, C., Liu, S., Tompkins, D., Chen, Z., and Wei, F. Beats: Audio pre-training with acoustic tokenizers. arXiv preprint arXiv:2212.09058, 2022b." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.453, + 0.476, + 0.499 + ], + "angle": 0, + "content": "Copet, J., Kreuk, F., Gat, I., Remez, T., Kant, D., Synnaeve, G., Adi, Y., and Defossez, A. Simple and controllable music generation. arXiv preprint arXiv:2306.05284, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.508, + 0.476, + 0.599 + ], + "angle": 0, + "content": "Costantini, G., Iaderola, I., Paoloni, A., Todisco, M., et al. Emovo corpus: an italian emotional speech database. In Proceedings of the ninth international conference on language resources and evaluation (LREC'14), pp. 3501-3504. European Language Resources Association (ELRA), 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.609, + 0.476, + 0.654 + ], + "angle": 0, + "content": "Défossez, A., Copet, J., Synnaeve, G., and Adi, Y. High fidelity neural audio compression. arXiv preprint arXiv:2210.13438, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.664, + 0.476, + 0.724 + ], + "angle": 0, + "content": "Défossez, A., Mazaré, L., Orsini, M., Royer, A., Pérez, P., Jégou, H., Grave, E., and Zeghidour, N. Moshi: a speech-text foundation model for real-time dialogue. arXiv preprint arXiv:2410.00037, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.734, + 0.476, + 0.78 + ], + "angle": 0, + "content": "Doh, S., Choi, K., Lee, J., and Nam, J. Lp-musiccaps: Llm-based pseudo music captioning. arXiv preprint arXiv:2307.16372, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.79, + 0.476, + 0.851 + ], + "angle": 0, + "content": "Drossos, K., Lipping, S., and Virtanen, T. Clotho: An audio captioning dataset. In ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 736-740. IEEE, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.861, + 0.476, + 0.907 + ], + "angle": 0, + "content": "Du, Z., Chen, Q., Zhang, S., Hu, K., Lu, H., Yang, Y., Hu, H., Zheng, S., Gu, Y., Ma, Z., et al. Cosyvoice: A scalable multilingual zero-shot text-to-speech synthesizer" + }, + { + "type": "list", + "bbox": [ + 0.088, + 0.085, + 0.476, + 0.907 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.085, + 0.885, + 0.115 + ], + "angle": 0, + "content": "based on supervised semantic tokens. arXiv preprint arXiv:2407.05407, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.126, + 0.887, + 0.186 + ], + "angle": 0, + "content": "Hao, H., Zhou, L., Liu, S., Li, J., Hu, S., Wang, R., and Wei, F. Boosting large language model for speech synthesis: An empirical study. arXiv preprint arXiv:2401.00246, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.198, + 0.887, + 0.259 + ], + "angle": 0, + "content": "He, K., Chen, X., Xie, S., Li, Y., Dollar, P., and Girshick, R. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 16000-16009, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.269, + 0.887, + 0.315 + ], + "angle": 0, + "content": "Ho, J., Jain, A., and Abbeel, P. Denoising diffusion probabilistic models. Advances in neural information processing systems, 33:6840-6851, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.326, + 0.887, + 0.416 + ], + "angle": 0, + "content": "Hsu, W.-N., Bolte, B., Tsai, Y.-H. H., Lakhotia, K., Salakhutdinov, R., and Mohamed, A. Hubert: Self-supervised speech representation learning by masked prediction of hidden units. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 29:3451-3460, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.427, + 0.887, + 0.488 + ], + "angle": 0, + "content": "Hu, E. J., Shen, Y., Wallis, P., Allen-Zhu, Z., Li, Y., Wang, S., Wang, L., and Chen, W. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.499, + 0.887, + 0.56 + ], + "angle": 0, + "content": "Huang, P.-Y., Xu, H., Li, J., Baevski, A., Auli, M., Galuba, W., Metze, F., and Feichtenhofer, C. Masked autoencoders that listen. Advances in Neural Information Processing Systems, 35:28708-28720, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.57, + 0.887, + 0.645 + ], + "angle": 0, + "content": "Ji, S., Jiang, Z., Wang, W., Chen, Y., Fang, M., Zuo, J., Yang, Q., Cheng, X., Wang, Z., Li, R., et al. Wavtokenizer: an efficient acoustic discrete codec tokenizer for audio language modeling. arXiv preprint arXiv:2408.16532, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.657, + 0.887, + 0.718 + ], + "angle": 0, + "content": "Ju, Z., Wang, Y., Shen, K., Tan, X., Xin, D., Yang, D., Liu, Y., Leng, Y., Song, K., Tang, S., et al. Naturalspeech 3: Zero-shot speech synthesis with factorized codec and diffusion models. arXiv preprint arXiv:2403.03100, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.728, + 0.887, + 0.819 + ], + "angle": 0, + "content": "Kang, W., Yang, X., Yao, Z., Kuang, F., Yang, Y., Guo, L., Lin, L., and Povey, D. Libriheavy: a 50,000 hours asr corpus with punctuation casing and context. In ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 10991-10995. IEEE, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.83, + 0.887, + 0.906 + ], + "angle": 0, + "content": "Kharitonov, E., Vincent, D., Borsos, Z., Marinier, R., Girgin, S., Pietquin, O., Sharifi, M., Tagliasacchi, M., and Zeghidour, N. Speak, read and prompt: High-fidelity text-to-speech with minimal supervision. arXiv preprint arXiv:2302.03540, 2023." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.085, + 0.887, + 0.906 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.924, + 0.496, + 0.935 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.209, + 0.057, + 0.764, + 0.072 + ], + "angle": 0, + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.085, + 0.479, + 0.177 + ], + "angle": 0, + "content": "Kim, C. D., Kim, B., Lee, H., and Kim, G. Audiocaps: Generating captions for audios in the wild. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pp. 119-132, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.185, + 0.479, + 0.247 + ], + "angle": 0, + "content": "Kreuk, F., Synnaeve, G., Polyak, A., Singer, U., Défossez, A., Copet, J., Parikh, D., Taigman, Y., and Adi, Y. Audiogen: Textually guided audio generation. arXiv preprint arXiv:2209.15352, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.255, + 0.478, + 0.332 + ], + "angle": 0, + "content": "Kumar, R., Seetharaman, P., Luebs, A., Kumar, I., and Kumar, K. High-fidelity audio compression with improved RVQGAN. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=qjnl1QUUnFA." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.341, + 0.479, + 0.446 + ], + "angle": 0, + "content": "La Quatra, M., Koudounas, A., Vaiani, L., Baralis, E., Cagliero, L., Garza, P., and Siniscalchi, S. M. Benchmarking representations for speech, music, and acoustic events. In 2024 IEEE International Conference on Acoustics, Speech, and Signal Processing Workshops (ICAS-SPW), pp. 505-509, 2024. doi: 10.1109/ICASSPW62465.2024.10625960." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.455, + 0.478, + 0.517 + ], + "angle": 0, + "content": "Li, H., Xue, L., Guo, H., Zhu, X., Lv, Y., Xie, L., Chen, Y., Yin, H., and Li, Z. Single-codec: Single-codebook speech codec towards high-performance speech generation. arXiv preprint arXiv:2406.07422, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.526, + 0.476, + 0.587 + ], + "angle": 0, + "content": "Li, J., Li, D., Savarese, S., and Hoi, S. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In International conference on machine learning, pp. 19730-19742. PMLR, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.596, + 0.476, + 0.641 + ], + "angle": 0, + "content": "Lipman, Y., Chen, R. T., Ben-Hamu, H., Nickel, M., and Le, M. Flow matching for generative modeling. arXiv preprint arXiv:2210.02747, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.65, + 0.478, + 0.711 + ], + "angle": 0, + "content": "Liu, H., Xu, X., Yuan, Y., Wu, M., Wang, W., and Plumbley, M. D. Semanticodec: An ultra low bitrate semantic audio codec for general sound. arXiv preprint arXiv:2405.00233, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.721, + 0.478, + 0.766 + ], + "angle": 0, + "content": "Lostanlen, V. and Cella, C.-E. Deep convolutional networks on the pitch spiral for musical instrument recognition. arXiv preprint arXiv:1605.06644, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.775, + 0.478, + 0.851 + ], + "angle": 0, + "content": "Mei, X., Meng, C., Liu, H., Kong, Q., Ko, T., Zhao, C., Plumbley, M. D., Zou, Y., and Wang, W. Wavcaps: A chatgpt-assisted weakly-labelled audio captioning dataset for audio-language multimodal research. arXiv preprint arXiv:2303.17395, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.861, + 0.478, + 0.907 + ], + "angle": 0, + "content": "Nguyen, T. A., Muller, B., Yu, B., Costa-Jussa, M. R., Elbayad, M., Popuri, S., Ropers, C., Duquenne, P.-A., Algayres, R., Mavlyutov, R., et al. Spirit-lm: Interleaved" + }, + { + "type": "list", + "bbox": [ + 0.088, + 0.085, + 0.479, + 0.907 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.085, + 0.888, + 0.129 + ], + "angle": 0, + "content": "spoken and written language model. Transactions of the Association for Computational Linguistics, 13:30-52, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.144, + 0.887, + 0.173 + ], + "angle": 0, + "content": "OpenAI. Gpt-4 technical report. arXiv preprint arXiv:2204.06125, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.187, + 0.889, + 0.261 + ], + "angle": 0, + "content": "Panayotov, V., Chen, G., Povey, D., and Khudanpur, S. Librispeech: an asr corpus based on public domain audio books. In 2015 IEEE international conference on acoustics, speech and signal processing (ICASSP), pp. 5206-5210. IEEE, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.275, + 0.889, + 0.335 + ], + "angle": 0, + "content": "Parker, J. D., Smirnov, A., Pons, J., Carr, C., Zukowski, Z., Evans, Z., and Liu, X. Scaling transformers for low-bitrate high-quality speech coding. arXiv preprint arXiv:2411.19842, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.348, + 0.889, + 0.394 + ], + "angle": 0, + "content": "Piczak, K. J. Esc: Dataset for environmental sound classification. In Proceedings of the 23rd ACM international conference on Multimedia, pp. 1015-1018, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.407, + 0.889, + 0.452 + ], + "angle": 0, + "content": "Pratap, V., Xu, Q., Sriram, A., Synnaeve, G., and Collobert, R. Mls: A large-scale multilingual dataset for speech research. arXiv preprint arXiv:2012.03411, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.464, + 0.889, + 0.541 + ], + "angle": 0, + "content": "Reddy, C. K., Gopal, V., and Cutler, R. Dnsmos p. 835: A non-intrusive perceptual objective speech quality metric to evaluate noise suppressors. In ICASSP 2022-2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 886-890. IEEE, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.553, + 0.889, + 0.613 + ], + "angle": 0, + "content": "Saeki, T., Xin, D., Nakata, W., Koriyama, T., Takamichi, S., and Saruwatari, H. Utmos: Utokyo-sarulab system for voicemos challenge 2022. arXiv preprint arXiv:2204.02152, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.626, + 0.889, + 0.672 + ], + "angle": 0, + "content": "Siuzdak, H. Vocos: Closing the gap between time-domain and fourier-based neural vocoders for high-quality audio synthesis. arXiv preprint arXiv:2306.00814, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.684, + 0.889, + 0.774 + ], + "angle": 0, + "content": "Tang, C., Yu, W., Sun, G., Chen, X., Tan, T., Li, W., Lu, L., MA, Z., and Zhang, C. SALMONN: Towards generic hearing abilities for large language models. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=14rn7HpKVk." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.788, + 0.889, + 0.833 + ], + "angle": 0, + "content": "van Niekerk, B., Zäïdi, J., Carbonneau, M.-A., and Kamper, H. Spoken-term discovery using discrete speech units. arXiv preprint arXiv:2408.14390, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.846, + 0.889, + 0.907 + ], + "angle": 0, + "content": "Veaux, C., Yamagishi, J., MacDonald, K., et al. Cstr vctk corpus: English multi-speaker corpus for cstr voice cloning toolkit. University of Edinburgh. The Centre for Speech Technology Research (CSTR), 6:15, 2017." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.085, + 0.889, + 0.907 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.924, + 0.495, + 0.935 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.209, + 0.057, + 0.764, + 0.072 + ], + "angle": 0, + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.085, + 0.476, + 0.146 + ], + "angle": 0, + "content": "Wang, C., Chen, S., Wu, Y., Zhang, Z., Zhou, L., Liu, S., Chen, Z., Liu, Y., Wang, H., Li, J., et al. Neural codec language models are zero-shot text to speech synthesizers. arXiv preprint arXiv:2301.02111, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.158, + 0.476, + 0.218 + ], + "angle": 0, + "content": "Wang, H., Suri, S., Ren, Y., Chen, H., and Shrivastava, A. Larp: Tokenizing videos with a learned autoregressive generative prior. arXiv preprint arXiv:2410.21264, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.231, + 0.476, + 0.321 + ], + "angle": 0, + "content": "Wang, Y., Chen, H., Yang, D., Yu, J., Weng, C., Wu, Z., and Meng, H. Consistent and relevant: Rethink the query embedding in general sound separation. In ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 961-965. IEEE, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.334, + 0.476, + 0.395 + ], + "angle": 0, + "content": "Wang, Y., Chen, H., Yang, D., Li, W., Luo, D., Li, G., Yang, S., Wu, Z., Meng, H., and Wu, X. Unisep: Universal target audio separation with language models at scale. arXiv preprint arXiv:2503.23762, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.407, + 0.476, + 0.453 + ], + "angle": 0, + "content": "Wu, H., Kanda, N., Eskimez, S. E., and Li, J. Ts3-codec: Transformer-based simple streaming single codec. arXiv preprint arXiv:2411.18803, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.465, + 0.476, + 0.526 + ], + "angle": 0, + "content": "Yang, D., Liu, S., Huang, R., Lei, G., Weng, C., Meng, H., and Yu, D. Instructts: Modelling expressive tts in discrete latent space with natural language style prompt. arXiv preprint arXiv:2301.13662, 2023a." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.538, + 0.476, + 0.598 + ], + "angle": 0, + "content": "Yang, D., Liu, S., Huang, R., Tian, J., Weng, C., and Zou, Y. Hifi-codec: Group-residual vector quantization for high fidelity audio codec. arXiv preprint arXiv:2305.02765, 2023b." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.611, + 0.476, + 0.672 + ], + "angle": 0, + "content": "Yang, D., Tian, J., Tan, X., Huang, R., Liu, S., Chang, X., Shi, J., Zhao, S., Bian, J., Wu, X., et al. Uniaudio: An audio foundation model toward universal audio generation. arXiv preprint arXiv:2310.00704, 2023c." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.684, + 0.476, + 0.745 + ], + "angle": 0, + "content": "Yang, D., Yu, J., Wang, H., Wang, W., Weng, C., Zou, Y., and Yu, D. Diffsound: Discrete diffusion model for text-to-sound generation. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 2023d." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.758, + 0.476, + 0.817 + ], + "angle": 0, + "content": "Yang, D., Guo, H., Wang, Y., Huang, R., Li, X., Tan, X., Wu, X., and Meng, H. Uniaudio 1.5: Large language model-driven audio codec is a few-shot audio task learner. arXiv preprint arXiv:2406.10056, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.83, + 0.476, + 0.905 + ], + "angle": 0, + "content": "Yang, D., Huang, R., Wang, Y., Guo, H., Chong, D., Liu, S., Wu, X., and Meng, H. Simplespeech 2: Towards simple and efficient text-to-speech with flow-based scalar latent transformer diffusion models. arXiv preprint arXiv:2408.13893, 2024b." + }, + { + "type": "list", + "bbox": [ + 0.088, + 0.085, + 0.476, + 0.905 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.085, + 0.887, + 0.146 + ], + "angle": 0, + "content": "Yang, D., Wang, D., Guo, H., Chen, X., Wu, X., and Meng, H. Simplespeech: Towards simple and efficient text-to-speech with scalar latent transformer diffusion models. arXiv preprint arXiv:2406.02328, 2024c." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.156, + 0.887, + 0.216 + ], + "angle": 0, + "content": "Yang, S.-w., Chi, P.-H., Chuang, Y.-S., Lai, C.-I. J., Lakhotia, K., Lin, Y. Y., Liu, A. T., Shi, J., Chang, X., Lin, G.-T., et al. Superb: Speech processing universal performance benchmark. arXiv preprint arXiv:2105.01051, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.226, + 0.887, + 0.285 + ], + "angle": 0, + "content": "Yu, Q., Weber, M., Deng, X., Shen, X., Cremers, D., and Chen, L.-C. An image is worth 32 tokens for reconstruction and generation. arXiv preprint arXiv:2406.07550, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.297, + 0.887, + 0.357 + ], + "angle": 0, + "content": "Zeghidour, N., Luebs, A., Omran, A., Skoglund, J., and Tagliasacchi, M. Soundstream: An end-to-end neural audio codec. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 30:495-507, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.367, + 0.887, + 0.426 + ], + "angle": 0, + "content": "Zen, H., Dang, V., Clark, R., Zhang, Y., Weiss, R. J., Jia, Y., Chen, Z., and Wu, Y. Libritts: A corpus derived from librispeech for text-to-speech. arXiv preprint arXiv:1904.02882, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.438, + 0.887, + 0.498 + ], + "angle": 0, + "content": "Zeng, A., Du, Z., Liu, M., Wang, K., Jiang, S., Zhao, L., Dong, Y., and Tang, J. Glm-4-voice: Towards intelligent and human-like end-to-end spoken chatbot. arXiv preprint arXiv:2412.02612, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.508, + 0.887, + 0.553 + ], + "angle": 0, + "content": "Zhang, X., Zhang, D., Li, S., Zhou, Y., and Qiu, X. Speechtokenizer: Unified speech tokenizer for speech large language models. arXiv preprint arXiv:2308.16692, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.564, + 0.887, + 0.608 + ], + "angle": 0, + "content": "Zhu, L., Wei, F., Lu, Y., and Chen, D. Scaling the codebook size of vqgan to 100,000 with a utilization rate of \\(99\\%\\). arXiv preprint arXiv:2406.11837, 2024." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.085, + 0.887, + 0.608 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.209, + 0.057, + 0.765, + 0.071 + ], + "angle": 0, + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + }, + { + "type": "image", + "bbox": [ + 0.156, + 0.085, + 0.481, + 0.384 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.527, + 0.081, + 0.825, + 0.392 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.407, + 0.89, + 0.451 + ], + "angle": 0, + "content": "Figure 4. The left diagram illustrates the framework of the audio language model, which includes a pre-trained LLM, a LoRA module, and a depth transformer. The audio language model can process both text and audio streaming inputs and generate corresponding text and audio outputs. The right diagram provides details of hierarchical audio modeling." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.513, + 0.516, + 0.531 + ], + "angle": 0, + "content": "A. The details of audio language model framework" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.539, + 0.889, + 0.629 + ], + "angle": 0, + "content": "In this section, we provide details of the audio language model. We follow the framework of UniAudio (Yang et al., 2023c) and Moshi (Défossez et al., 2024), which combines a pre-trained LLM with a smaller Transformer model to predict audio tokens in a hierarchical manner. In their original paper, both the LLM and the small Transformer are updated during the training process. Due to resource limitations, and following (Hao et al., 2023), we incorporate LoRA (Hu et al., 2021) into the LLM model. For the LLM model, we use the LLAMA3.2 1B version. During training, we update only the LoRA module and the small Transformer." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.637, + 0.888, + 0.669 + ], + "angle": 0, + "content": "LORA setting For the LoRA module, we add LoRA parameters to the self-attention and linear layers. We set \\( lora_{r} = 32 \\) and \\( lora_{alpha} = 16 \\)." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.675, + 0.888, + 0.704 + ], + "angle": 0, + "content": "Depth Transformer setting For the depth transformer, we use 6 self-attention layer. We set the attention head number as 32. The attention dimension is the same as the LLAMA 3.2 1B." + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.725, + 0.864, + 0.742 + ], + "angle": 0, + "content": "B. The details of the influence of bitrate and semantic information for audio language model." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.751, + 0.888, + 0.798 + ], + "angle": 0, + "content": "In this section, we provide details of the validation experiments to explore the influence of bitrate and semantic information on audio language models. Following AudioLM (Borsos et al., 2023a), we construct an audio token pre-training task similar to text pre-training, where the model is tasked with predicting the next audio token based on the previous token sequence." + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.813, + 0.219, + 0.828 + ], + "angle": 0, + "content": "B.1. Training data" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.836, + 0.855, + 0.852 + ], + "angle": 0, + "content": "We conduct the experiments on 2000 hours speech data, these data is selected from MLS dataset (Pratap et al., 2020)." + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.868, + 0.187, + 0.882 + ], + "angle": 0, + "content": "B.2. Test data" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.891, + 0.365, + 0.906 + ], + "angle": 0, + "content": "We evaluate on LibriSpeech test clean set." + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.209, + 0.058, + 0.764, + 0.071 + ], + "angle": 0, + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + }, + { + "type": "table_caption", + "bbox": [ + 0.238, + 0.094, + 0.734, + 0.107 + ], + "angle": 0, + "content": "Table 8. The reconstruction performance of different frame rate of audio tokenizers." + }, + { + "type": "table", + "bbox": [ + 0.162, + 0.107, + 0.812, + 0.174 + ], + "angle": 0, + "content": "
VersionBitrate (↓)FPS (↓)codebook sizePESQ (↑)UT-MOS (↑)VISQOL (↑)STOI (↑)
50hz1650bps5020482.223.693.630.86
25hz825bps2520482.073.563.610.83
12.5hz412.5bps12.520481.582.493.370.77
" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.293, + 0.206, + 0.306 + ], + "angle": 0, + "content": "B.3. Framework" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.317, + 0.778, + 0.332 + ], + "angle": 0, + "content": "We use the same framework as described in Section A; the difference is that we do not use text streaming." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.348, + 0.351, + 0.364 + ], + "angle": 0, + "content": "B.4. Three Types of Audio Tokenizers" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.372, + 0.888, + 0.403 + ], + "angle": 0, + "content": "Following the structure of MimiCodec (Défossez et al., 2024), we train three versions of the audio codec tokenizer. All of the audio codec models are trained on \\(24\\mathrm{kHz}\\) speech data. We train three versions of the audio codec models, as follows:" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.409, + 0.887, + 0.44 + ], + "angle": 0, + "content": "(V1) We set the down-sampling rate to [2, 5, 6, 8], resulting in a \\(50\\mathrm{Hz}\\) frame rate. We use three RVQ layers, and the codebook size is 2,048. The bitrate of this audio codec is 1.65 kbps." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.447, + 0.885, + 0.479 + ], + "angle": 0, + "content": "(V2) We set the down-sampling rate to [4, 5, 6, 8], resulting in a \\(25\\mathrm{Hz}\\) frame rate. We use three RVQ layers, and the codebook size is 2,048. The bitrate of this audio codec is 825 bps." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.484, + 0.885, + 0.517 + ], + "angle": 0, + "content": "(V3) We set the down-sampling rate to [2, 4, 5, 6, 8], resulting in a \\(12.5\\mathrm{Hz}\\) frame rate. We use three RVQ layers, and the codebook size is 2,048. The bitrate of this audio codec is 412.5 bps." + }, + { + "type": "list", + "bbox": [ + 0.085, + 0.409, + 0.887, + 0.517 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.522, + 0.888, + 0.569 + ], + "angle": 0, + "content": "Note that the original MimiCodec is trained with distillation loss from WavLM; we do not add this loss during the training of our audio tokenizer. Therefore, these three audio tokenizers do not include any semantic information. Table 8 shows the reconstruction performance of the three audio tokenizers." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.585, + 0.26, + 0.599 + ], + "angle": 0, + "content": "B.5. Semantic Tokenizer" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.608, + 0.888, + 0.683 + ], + "angle": 0, + "content": "The previous three audio codec tokenizers do not consider semantic information. To evaluate the importance of semantic information, we follow WhisperSpeech5 to build a Whisper-based semantic tokenizer. Specifically, we follow the training code of WhisperSpeech, using two down-sampling layers to compress the Whisper encoder's features into a \\(12.5\\mathrm{Hz}\\) frame rate, and then we add three RVQ layers to quantize them. Thus, this semantic tokenizer has the same bitrate as the V3 audio tokenizer." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.7, + 0.255, + 0.714 + ], + "angle": 0, + "content": "B.6. Evaluation metrics" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.724, + 0.542, + 0.74 + ], + "angle": 0, + "content": "We evaluate the pre-training performance from the following aspects:" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.746, + 0.888, + 0.793 + ], + "angle": 0, + "content": "Training efficiency: As is well known, the space complexity of a transformer is \\( O(T^2) \\), where \\( T \\) is the sequence length. A low-bitrate audio tokenizer can compress the audio signal into a few token sequences, thereby improving training efficiency. For all experiments, we use the same GPU machine to train the model and record the statistical training duration." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.799, + 0.888, + 0.845 + ], + "angle": 0, + "content": "Inference efficiency: Similarly, a low-bitrate audio tokenizer can improve inference efficiency, as it requires fewer inference steps. We use the Real-Time Factor (RTF) to assess inference efficiency. Note that for all experiments, we do not use any inference optimization tricks, such as KV cache." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.851, + 0.888, + 0.883 + ], + "angle": 0, + "content": "Validation loss and perplexity: Following text LLMs (OpenAI, 2023), we use validation loss and perplexity to evaluate model performance." + }, + { + "type": "page_footnote", + "bbox": [ + 0.106, + 0.891, + 0.416, + 0.907 + ], + "angle": 0, + "content": "5https://github.com/WhisperSpeech/WhisperSpeech" + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.209, + 0.057, + 0.764, + 0.071 + ], + "angle": 0, + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + }, + { + "type": "image", + "bbox": [ + 0.274, + 0.088, + 0.703, + 0.355 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.236, + 0.376, + 0.737, + 0.391 + ], + "angle": 0, + "content": "Figure 5. The performance comparison with different window size during inference." + }, + { + "type": "table_caption", + "bbox": [ + 0.272, + 0.424, + 0.7, + 0.437 + ], + "angle": 0, + "content": "Table 9. The influence of codebook size for reconstruction performance." + }, + { + "type": "table", + "bbox": [ + 0.142, + 0.437, + 0.832, + 0.503 + ], + "angle": 0, + "content": "
Codebook SizePESQ (↑)UT-MOS (↑)VISQOL (↑)STOI (↑)STFT loss (↓)Token utilization (↑)
20482.03.763.780.811.20100%
10241.833.663.650.801.14100%
5121.693.643.580.7921.18100%
" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.535, + 0.24, + 0.552 + ], + "angle": 0, + "content": "C. Ablation study" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.561, + 0.46, + 0.575 + ], + "angle": 0, + "content": "C.1. The influence of window size for ALMTokenizer" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.584, + 0.888, + 0.66 + ], + "angle": 0, + "content": "As discussed in the previous section, the proposed ALMTokensizer supports a dynamic compression rate by changing the window size \\( w \\). Figure 5 shows the comparison of reconstruction performance with different window sizes. We observe that using a smaller window size results in better reconstruction performance, but it also increases the bitrate. For example, if the window size is 2, the bitrate is 1237.5bps, window size is 6, the bitrate is 412.5. It also shows the advantages of proposed method: we can dynamically change the frame rate during the inference by setting different window size." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.677, + 0.336, + 0.691 + ], + "angle": 0, + "content": "C.2. The influence of codebook size" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.7, + 0.889, + 0.746 + ], + "angle": 0, + "content": "We explore three different codebook sizes: 512, 1024, and 2048. To align with the setting of MimiCodec (Défossez et al., 2024), we set the max codebook size as 2048. The results, as shown in Table 9, are presented. We observe that scaling the codebook size improves reconstruction performance. Furthermore, we also find that almost all tokens have been used." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.762, + 0.534, + 0.777 + ], + "angle": 0, + "content": "C.3. The influence of model size for reconstruction performance" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.786, + 0.889, + 0.906 + ], + "angle": 0, + "content": "To explore the influence of model size on reconstruction performance, we set up two configurations: (1) We use 24 self-attention layers for both the transformer encoder and transformer decoder, resulting in 174M parameters. (2) We use 12 self-attention layers for both the transformer encoder and transformer decoder, resulting in 87M parameters. In both settings, we keep the Patchify module the same size, as it consists of several convolutional layers, and its total parameters are small. The experimental results, as shown in Table 10, indicate that using a larger model can improve reconstruction but also increases computational resource consumption (higher RTF). Previous work, StableCodec (Parker et al., 2024), shows that scaling the codec model to 1B parameters can lead to better performance. Due to computational resource limitations, we leave scaling to a larger model size for future work." + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.209, + 0.058, + 0.764, + 0.071 + ], + "angle": 0, + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + }, + { + "type": "table_caption", + "bbox": [ + 0.292, + 0.094, + 0.68, + 0.107 + ], + "angle": 0, + "content": "Table 10. The influence of model for reconstruction performance." + }, + { + "type": "table", + "bbox": [ + 0.172, + 0.107, + 0.803, + 0.161 + ], + "angle": 0, + "content": "
SettingPESQ (↑)UT-MOS (↑)VISQOL (↑)STOI (↑)Model size (↓)RTF (↓)
24 attention layer2.03.763.780.811740.031
12 attention layer1.873.573.700.79870.019
" + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.184, + 0.4, + 0.198 + ], + "angle": 0, + "content": "C.4. The influence of mask-rate in MAE loss" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.207, + 0.888, + 0.253 + ], + "angle": 0, + "content": "Inspired by MAE(He et al., 2022), we tested three groups of mask rates ranges: (10–20%), (20–30%), and (30–40%). The experiments as following Table shows. Results indicate that higher rates (30–40%) benefit semantics but harm reconstruction, leading us to adopt an intermediate range (20–30%)." + }, + { + "type": "table_caption", + "bbox": [ + 0.333, + 0.276, + 0.639, + 0.287 + ], + "angle": 0, + "content": "Table 11. The influence of mask-rate for MAE loss." + }, + { + "type": "table", + "bbox": [ + 0.194, + 0.287, + 0.779, + 0.352 + ], + "angle": 0, + "content": "
mask rate rangeUTMOSDNSMOSVISQOLPESQSTOIASRER
10-20%3.773.623.802.00.8118.727.7
20-30%3.763.643.782.00.8118.329.0
30-40%3.363.063.311.580.7718.129.6
" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.385, + 0.206, + 0.4 + ], + "angle": 0, + "content": "D. Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.411, + 0.886, + 0.441 + ], + "angle": 0, + "content": "We evaluate the performance of the previous SOTA audio tokenizers and our proposed ALMTokensizer across audio reconstruction, audio semantic information, audio understanding, and audio generation tasks." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.457, + 0.275, + 0.47 + ], + "angle": 0, + "content": "D.1. Audio Reconstruction" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.481, + 0.886, + 0.511 + ], + "angle": 0, + "content": "For speech data, we use DNS-MOS (Reddy et al., 2022), UT-MOS (Saeki et al., 2022), PESQ, STOI (Short-Time Objective Intelligibility), VISQOL (speech version), and STFT loss as metrics." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.519, + 0.888, + 0.579 + ], + "angle": 0, + "content": "For sound and music data, we use VISQOL (audio version), STFT loss, and Mel loss. Furthermore, following (Kumar et al., 2023), we conduct the MUSHRA subjective test for speech, sound, and music. Specifically, we hire 10 audio-related researchers to conduct the MOS evaluation. We ask the listeners to rate each audio, with scores ranging from 0 to 100. Refer to D.5 for the details." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.587, + 0.888, + 0.648 + ], + "angle": 0, + "content": "Evaluation Datasets: For speech data, we evaluate on a subset of VCTK (Veaux et al., 2017) (200 speech utterances) and a subset of the LibriTTS test clean set (Zen et al., 2019) (400 speech utterances). For sound data, we evaluate on a subset of the AudioCaps validation set (Kim et al., 2019) (200 sound utterances). For music data, we evaluate on a subset of the MusicCaps (Agostinelli et al., 2023) dataset (200 music utterances)." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.664, + 0.323, + 0.677 + ], + "angle": 0, + "content": "D.2. Audio Semantic Information" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.687, + 0.888, + 0.764 + ], + "angle": 0, + "content": "Previous SSL models, such as Hubert (Hsu et al., 2021) and WavLM (Chen et al., 2022a), have shown that semantic-rich representations can be used to solve downstream recognition tasks by fine-tuning several adaptor layers. Inspired by these works, we propose evaluating the performance of the audio tokenizer for downstream recognition tasks. We use the quantized features of the audio tokenizer as the input for downstream tasks. We follow two popular benchmarks: SUPERB (Yang et al., 2021) and ARCH (La Quatra et al., 2024)." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.77, + 0.888, + 0.831 + ], + "angle": 0, + "content": "For speech data, we conduct the automatic speech recognition (ASR) task on the LibriSpeech (Panayotov et al., 2015) dataset and the emotion classification (EC) task on the EMOVO (Costantini et al., 2014) dataset. For the ASR task, we train on the LibriSpeech train-100 set and evaluate on the LibriSpeech test clean set. For the EC task, we follow ARCH (La Quatra et al., 2024) to split the training and test sets." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.838, + 0.888, + 0.884 + ], + "angle": 0, + "content": "For sound data, we conduct the sound classification task on the ESC-50 dataset (Piczak, 2015). For music data, we conduct the music classification task on the Medley-Solos-DB dataset (Lostanlen & Cella, 2016). For both tasks, we follow the ARCH benchmarking settings to split the training and test sets." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.891, + 0.888, + 0.907 + ], + "angle": 0, + "content": "For all experiments, we train for 10 epochs with the same learning rate and batch size. For the automatic speech recognition" + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.924, + 0.496, + 0.935 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.209, + 0.058, + 0.764, + 0.071 + ], + "angle": 0, + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.086, + 0.834, + 0.1 + ], + "angle": 0, + "content": "task, we use word error rate (WER) as the metric. For the other classification tasks, we use accuracy as the metric." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.117, + 0.35, + 0.133 + ], + "angle": 0, + "content": "D.3. LM-based Audio Understanding" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.141, + 0.888, + 0.217 + ], + "angle": 0, + "content": "Overview To further validate whether the audio tokenizer is suitable for building an audio language model, we propose conducting an audio understanding task using discrete tokens as input. We conduct three tasks: automatic speech recognition (ASR), audio captioning, and music captioning. We use the framework introduced in Section A. For audio data, we use the audio tokenizer to encode it as discrete tokens; for text data, we use the BPE tokenizer of LLAMA 3.2. We construct the sequence as [audio token, text token], then the model is asked to predict the text token based on the previous audio token." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.224, + 0.888, + 0.315 + ], + "angle": 0, + "content": "Training Data For the ASR task, we select 2,000 hours of LibriHeavy speech data (Kang et al., 2024). For the audio captioning tasks, we use AudioCaps (Kim et al., 2019) and BBC sound effects (Mei et al., 2023). For the BBC sound effects, we cut off the first 10 seconds of audio if the utterance duration is greater than 10 seconds. Finally, we obtain about 500 hours of sound data. For the music captioning task, we use a subset of the Million Song dataset. We cut off the first 10 seconds of music data for each utterance, which results in about 500 hours of music data. For the corresponding captions, we use LPMusicCaps (Doh et al., 2023)." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.322, + 0.887, + 0.368 + ], + "angle": 0, + "content": "Test Data For the ASR task, we evaluate on the LibriSpeech test clean set. For the audio captioning task, we evaluate on the AudioCaps dataset (Kim et al., 2019). For the music captioning task, we evaluate on the MusicCaps dataset (Agostinelli et al., 2023)." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.375, + 0.888, + 0.406 + ], + "angle": 0, + "content": "Metrics Similarly, we use WER as the evaluation metric for the ASR task. For audio and music captioning, we follow (Drossos et al., 2020) and adopt BLEU-1, BLEU-2, BLEU-3, METEOR, ROUGE-L, CIDEr-D, SPICE, and SPIDER metrics." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.412, + 0.821, + 0.429 + ], + "angle": 0, + "content": "Inference Setting For inference, we directly use the top-k sampling strategy and set \\( k = 30 \\) for all experiments." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.444, + 0.325, + 0.459 + ], + "angle": 0, + "content": "D.4. LM-based Audio Generation" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.468, + 0.888, + 0.513 + ], + "angle": 0, + "content": "We also perform audio generation tasks, including text-to-speech, text-to-sound, and text-to-music generation. Similarly, we construct the sequence as [text token, audio token], then the model is asked to predict the audio token based on the previous text token." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.521, + 0.724, + 0.537 + ], + "angle": 0, + "content": "Training and Test Data We use the same training and test data as the audio comprehension task." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.543, + 0.888, + 0.589 + ], + "angle": 0, + "content": "Metrics For TTS evaluation, we use WER to evaluate robustness, and UTMOS and DNSMOS are used to assess speech quality. For text-to-sound and text-to-music, we follow previous works AudioGen (Kreuk et al., 2022), using Fréchet Audio Distance (FAD), Kullback-Leibler (KL) Divergence, and Fréchet Distance (FD) for audio fidelity and similarity." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.596, + 0.854, + 0.612 + ], + "angle": 0, + "content": "Inference Setting During the inference stage, we use the top-k sampling strategy and set \\( k = 30 \\) for all experiments." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.628, + 0.283, + 0.643 + ], + "angle": 0, + "content": "D.5. Subjective Evaluations" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.652, + 0.888, + 0.757 + ], + "angle": 0, + "content": "For the subjective evaluations, we adopt the approach used in previous works (Kumar et al., 2023; Parker et al., 2024) and use the MUSHRA format without a hidden anchor. Listeners are asked to compare multiple versions of an example simultaneously, including both a labeled reference and a hidden reference. They are given the following instructions: \"Please assess the quality similarity between an audio sample and its reference. Listen carefully to the reference audio, then rate the quality of each test clip in comparison. A score of 0 indicates no resemblance to the reference, while a score of 100 means it is identical to the reference.\" We randomly select 10 samples from each category (speech, music, and sound) in the test set, ensuring that each sample receives 10 ratings." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.776, + 0.334, + 0.792 + ], + "angle": 0, + "content": "E. Audio Tokenizer Baselines" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.803, + 0.888, + 0.833 + ], + "angle": 0, + "content": "To make a fair comparison, we classify the audio tokenizers into two types: (1) speech-based tokenizers, which are trained on speech datasets, and (2) audio-based tokenizers, which are trained on speech, sound, and music datasets." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.849, + 0.245, + 0.864 + ], + "angle": 0, + "content": "E.1. Speech Tokenizer" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.873, + 0.318, + 0.889 + ], + "angle": 0, + "content": "For speech data, we compare with:" + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.209, + 0.058, + 0.764, + 0.071 + ], + "angle": 0, + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + }, + { + "type": "table_caption", + "bbox": [ + 0.087, + 0.094, + 0.838, + 0.107 + ], + "angle": 0, + "content": "Table 12. The performance comparison on LibriTTS test clean. Bold for the best result and underline for the second-best result." + }, + { + "type": "table", + "bbox": [ + 0.092, + 0.108, + 0.88, + 0.226 + ], + "angle": 0, + "content": "
ModelsFPS/TPSCS/BRReconstructionEfficiency
UTMOS (↑)DNS-MOS (↑)VISQOL (↑)STOI (↑)PESQ (↑)Model size (M) (↓)RTF (↓)
Encodec50/4001024/6kbps3.303.763.950.942.72140.019
Encodec50/1501024/1.5kbps2.023.273.830.881.79140.019
DAC50/1501024/1.5kbps2.613.363.850.891.96710.026
Wavtokenizer40/404096/0.48kbps3.653.613.800.871.81770.017
StableCodec25/2546656/0.4kbps4.203.743.510.881.859500.039
MimiCodec (3 RVQ)12.5/37.52048/0.41kbps2.823.283.340.831.4075.60.023
ALMTokensizer (Ours)12.5/37.52048/0.41kbps3.683.643.900.901.921740.031
" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.267, + 0.886, + 0.297 + ], + "angle": 0, + "content": "(1) Encodec (Defossez et al., 2022), a SOTA audio codec model trained on large-scale speech, sound, and music datasets. The official open-sourced \\(24\\mathrm{kHz}\\) version is used." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.305, + 0.888, + 0.336 + ], + "angle": 0, + "content": "(2) DAC-Codec (Kumar et al., 2023), which offers very high reconstruction performance. It is trained on large-scale speech, sound, and music datasets. The official open-sourced \\(24\\mathrm{kHz}\\) version is used." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.343, + 0.885, + 0.373 + ], + "angle": 0, + "content": "(3) MimiCodec (Défossez et al., 2024), a SOTA low-bitrate speech codec model trained on a large-scale speech dataset. The sampling rate is \\(24\\mathrm{kHz}\\)." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.381, + 0.885, + 0.412 + ], + "angle": 0, + "content": "(4) SpeechTokenizer (Zhang et al., 2023), a semantic-rich speech codec model trained on a large-scale speech dataset. The sampling rate is \\(16\\mathrm{kHz}\\)." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.419, + 0.885, + 0.449 + ], + "angle": 0, + "content": "(5) WavTokenizer (Ji et al., 2024), an audio codec tokenizer trained on large-scale speech, sound, and music datasets. The sampling rate is \\(24\\mathrm{kHz}\\)." + }, + { + "type": "list", + "bbox": [ + 0.084, + 0.267, + 0.888, + 0.449 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.456, + 0.885, + 0.486 + ], + "angle": 0, + "content": "To make a fair comparison, for Encodec, DAC-Codec, and SpeechTokenizer, we use the first three RVQ layers to control the bitrate during inference." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.503, + 0.238, + 0.517 + ], + "angle": 0, + "content": "E.2. Audio Tokenizer" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.526, + 0.886, + 0.557 + ], + "angle": 0, + "content": "For sound and music data, we compare with Encodec, DAC-Codec, and WavTokenizer. These three models are trained on large-scale speech, sound, and music datasets." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.573, + 0.242, + 0.587 + ], + "angle": 0, + "content": "E.3. Semantic Models" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.597, + 0.888, + 0.643 + ], + "angle": 0, + "content": "Furthermore, to evaluate the performance of semantic information, we also introduce several SSL-based models. For speech, we use WavLM (Chen et al., 2022a) and HuBERT (Hsu et al., 2021). For sound and music, we use BEATs (Chen et al., 2022b) and Wav2Vec2-AudioSet \\(^{6}\\)." + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.662, + 0.49, + 0.679 + ], + "angle": 0, + "content": "F. More audio tokenizer evaluation experiments" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.688, + 0.434, + 0.703 + ], + "angle": 0, + "content": "F.1. The subjective evaluation for audio tokenizer" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.711, + 0.525, + 0.726 + ], + "angle": 0, + "content": "Table 7 shows the subjective evaluation results for audio tokenizer." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.743, + 0.408, + 0.757 + ], + "angle": 0, + "content": "F.2. Evaluation results on LibriTTS test clean" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.767, + 0.886, + 0.811 + ], + "angle": 0, + "content": "We report the reconstruction performance evaluated on a subset of the LibriTTS test clean set, where we randomly select 400 speech utterances. Additionally, we calculate the Real-Time Factor (RTF) and model size to assess efficiency. For RTF evaluation, we use an NVIDIA A100 GPU to evaluate all models." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.829, + 0.273, + 0.843 + ], + "angle": 0, + "content": "F.3. Length generalization" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.852, + 0.886, + 0.882 + ], + "angle": 0, + "content": "StableCodec (Parker et al., 2024) highlights that the introduction of transformer-based architectures can lead to the length generalization problem. For instance, the training data of ALMTokenizer consists of 5-second segments, whereas the test" + }, + { + "type": "page_footnote", + "bbox": [ + 0.106, + 0.891, + 0.436, + 0.906 + ], + "angle": 0, + "content": "6https://huggingface.co/ALM/wav2vec2-large-audioset" + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.209, + 0.058, + 0.763, + 0.071 + ], + "angle": 0, + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + }, + { + "type": "table_caption", + "bbox": [ + 0.085, + 0.094, + 0.887, + 0.121 + ], + "angle": 0, + "content": "Table 13. Objective metrics for the ALMTokenizer and baselines, evaluated on utterances from length 4s to 10s, showing generalization of models across lengths" + }, + { + "type": "table", + "bbox": [ + 0.151, + 0.121, + 0.825, + 0.368 + ], + "angle": 0, + "content": "
ModelFPSTPSBitratePESQ (↑)UT-MOS (↑)VISQOL (↑)STOI (↑)DNSMOS (↑)
4 seconds
Encodec501501.5kbps1.972.643.620.803.26
DAC501501.5kbps2.13.173.650.813.26
Ours12.537.50.41kbps1.843.633.690.793.41
6 seconds
Encodec501501.5kbps1.972.543.630.813.26
DAC501501.5kbps2.03.113.650.813.28
Ours12.537.50.41kbps1.893.663.750.813.62
8 seconds
Encodec501501.5kbps1.962.523.630.813.34
DAC501501.5kbps2.13.183.660.813.28
Ours12.537.50.41kbps1.953.553.740.813.66
10 seconds
Encodec501501.5kbps1.952.533.650.813.32
DAC501501.5kbps2.12.193.670.813.25
Ours12.537.50.41kbps1.963.543.730.813.66
" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.397, + 0.888, + 0.475 + ], + "angle": 0, + "content": "data comprises segments of varying durations. We evaluate the model across four distinct length levels: 4, 6, 8, and 10 seconds. Encodec and DAC are selected as baselines due to their reliance on convolutional layers, which demonstrate robustness to variable input lengths. As shown in Table 13, the evaluation results indicate that ALMTokensizer effectively handles inference across these diverse lengths. These findings suggest that ALMTokensizer exhibits strong generalization capabilities with respect to input length variation." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.489, + 0.461, + 0.505 + ], + "angle": 0, + "content": "F.4. Compared to diffusion-based audio codec models" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.513, + 0.889, + 0.725 + ], + "angle": 0, + "content": "We compare ALMTokens with an alternative family of audio tokenizers that leverage discrete semantic tokens derived from self-supervised pre-trained (SSL) models (e.g., Hubert (Hsu et al., 2021), WavLM (Chen et al., 2022a), AudioMAE (Huang et al., 2022)). These models first quantize the SSL features into semantic tokens and subsequently use a generative model to resynthesize the waveform. Diffusion (Ho et al., 2020) and Flow-Matching (Lipman et al., 2022) are two popular generative models. Previous works, such as GLM4-Voice tokenizer (Zeng et al., 2024) and SemantiCodec (Liu et al., 2024), have demonstrated success using diffusion-based decoders. However, such strategies tend to result in significant information loss. For instance, the semantic tokens in GLM4-Voice lack timbre information and require additional prompts to control timbre during decoding. Notably, the open-sourced GLM4-Voice tokenizer uses a fixed timbre, meaning that any speech encoded by GLM4-Voice will lose its original timbre. To address this information loss in semantic tokens, SemantiCodec introduces acoustic streaming to enhance waveform reconstruction. A key concern, however, is that both SemantiCodec and GLM4-Voice tokenizers demand significantly more computational resources during the inference stage. In the following, we present a comprehensive comparison between ALMTokens and SemantiCodec, focusing on the following aspects: (1) reconstruction performance for speech, sound, and music; (2) semantic information performance for speech, sound, and music; and (3) computational resource requirements during inference, measured using RTF." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.731, + 0.888, + 0.809 + ], + "angle": 0, + "content": "Table 14 shows the speech reconstruction and semantic performance, where we observe that ALMTokenizer outperforms the alternatives in both aspects while using less bitrate. Table 15 presents experimental results for sound and music data, where ALMTokenizer again demonstrates superior performance across all metrics compared to SemantiCodec. In Table 16, we present the model size and RTF metrics, showing that ALMTokenizer has fewer model parameters and significantly surpasses SemantiCodec in inference speed (0.031 vs 0.92)." + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.826, + 0.555, + 0.845 + ], + "angle": 0, + "content": "G. The details of ALMTokenizer structure and training" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.852, + 0.238, + 0.867 + ], + "angle": 0, + "content": "G.1. Model structure" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.876, + 0.886, + 0.907 + ], + "angle": 0, + "content": "Table 17 gives the details of ALMTokensizer configuration, which results in 174M parameters. In all of experiments, for the MAE-transformer encoded and decoder, we adopt a 8 layer transformer layers." + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.209, + 0.058, + 0.764, + 0.071 + ], + "angle": 0, + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + }, + { + "type": "table_caption", + "bbox": [ + 0.18, + 0.094, + 0.794, + 0.108 + ], + "angle": 0, + "content": "Table 14. The performance comparison between ALMTokensizer and SemanticCodec on VCTK dataset." + }, + { + "type": "table", + "bbox": [ + 0.094, + 0.108, + 0.88, + 0.173 + ], + "angle": 0, + "content": "
ModelsFPS/TPSCS/BRReconstructionSemantic
UTMOS (↑)DNS-MOS (↑)VISQOL (↑)STOI (↑)PESQ (↑)ASR (↓)EC (↑)
SemantiCodec50/5016384/0.68kbps3.23.573.900.811.7648.317.8
ALMTokensizer12.5/37.52048/0.41kbps3.763.643.780.812.018.329.0
" + }, + { + "type": "table_caption", + "bbox": [ + 0.086, + 0.195, + 0.887, + 0.21 + ], + "angle": 0, + "content": "Table 15. The performance comparison between ALMTokensizer and SemanticCodec on Music (MusicCaps) and sound data (AudioCaps)." + }, + { + "type": "table", + "bbox": [ + 0.132, + 0.211, + 0.842, + 0.338 + ], + "angle": 0, + "content": "
ModelsFPS/TPSCS/BRReconstructionSemantic
Mel loss (↓)STFT loss (↓)VISQOL (↑)Classification (↑)
Sound data
SemantiCodec50/5016384/0.68kbps18.451.402.4738.8%
ALMTokensizer12.5/37.52048/0.41kbps15.01.242.9944%
Music data
SemantiCodec50/5016384/0.68kbps47.91.582.4948%
ALMTokensizer12.5/37.52048/0.41kbps34.41.323.9659%
" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.36, + 0.888, + 0.481 + ], + "angle": 0, + "content": "Patchify and UnPatchify modules A single-channel audio signal \\( \\pmb{x} \\in \\mathcal{R}^{1 \\times N} \\) (where \\( N \\) denotes the sampling points) is processed through the Encodec-style Patchify and UnPatchify modules, which adopt the same structure as Encodec (Défossez et al., 2022), consisting of four convolutional blocks. Each convolutional block consists of a residual unit followed by a down-sampling layer. These convolution blocks effectively encode the audio signal \\( \\pmb{x} \\) into an audio frame representation \\( e \\in \\mathcal{R}^{T \\times d} \\), where \\( T \\) denotes the number of frames and \\( d \\) denotes the dimension of each vector. The convolution blocks are followed by a two-layer LSTM for sequence modeling, followed by a final 1D convolutional layer with a kernel size of 7 and \\( D \\) output channels. The UnPatchify module mirrors the Patchify architecture by substituting stride convolutions with transposed convolutions and reversing the stride order." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.489, + 0.888, + 0.55 + ], + "angle": 0, + "content": "For the StableCodec-style Patchify and UnPatchify modules, we follow the approach in StableCodec (Parker et al., 2024) and use a reshape operation to transform \\( \\boldsymbol{x} \\in \\mathcal{R}^{t \\times sr} \\) into \\( e \\in \\mathcal{R}^{T \\times d} \\), where \\( T = N / 320 \\) and \\( d = 320 \\). We then apply a linear layer to map the dimension to \\( D \\). Finally, we add four transformer layers for sequence modeling. Similarly, the UnPatchify module mirrors the Patchify architecture." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.557, + 0.888, + 0.618 + ], + "angle": 0, + "content": "Discriminators For the discriminators, we follow prior work (Défossez et al., 2022), which combines mel-spectrogram and log-mel-spectrogram features and inputs them into a network consisting of several convolutional layers. Specifically, we use six discriminators with different configurations: the hidden dimensions are set as 64, 128, 256, 512, 512, 512, and the hop lengths are set as 32, 64, 128, 256, 512, 1024." + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.634, + 0.538, + 0.648 + ], + "angle": 0, + "content": "G.2. Reconstruction loss and adversarial loss for ALMTokenizer" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.658, + 0.888, + 0.735 + ], + "angle": 0, + "content": "Let the reconstructed signal be \\(\\hat{\\pmb{x}}\\). For the reconstruction loss, we design it from two perspectives: the time domain and the frequency domain. We first compute the \\(L_{1}\\) loss between \\(\\pmb{x}\\) and \\(\\hat{\\pmb{x}}\\) in the time domain. Next, we compute the \\(L_{1}\\) loss between the STFT spectrogram of \\(\\pmb{x}\\) and \\(\\hat{\\pmb{x}}\\) in the frequency domain. Following (Wang et al., 2024b), we employ a sub-band split strategy to divide the spectrogram into several parts. The adversarial loss is employed to enhance the perceptual quality of the generated audio:" + }, + { + "type": "equation", + "bbox": [ + 0.295, + 0.741, + 0.887, + 0.782 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {d} = \\frac {1}{K} \\sum_ {i = 1} ^ {K} \\max (0, 1 - D _ {k} (\\boldsymbol {x})) + \\max (0, 1 + D _ {k} (\\hat {\\boldsymbol {x}})) \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.79, + 0.888, + 0.82 + ], + "angle": 0, + "content": "where \\(K\\) denotes the number of discriminators. During the training stage, the adversarial loss for the generator is computed as a hinge loss over the logits of these discriminators:" + }, + { + "type": "equation", + "bbox": [ + 0.365, + 0.828, + 0.887, + 0.869 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {a d v} = \\frac {1}{K} \\sum_ {i = 1} ^ {K} \\max (0, 1 - D _ {k} (\\hat {\\boldsymbol {x}})) \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.876, + 0.888, + 0.907 + ], + "angle": 0, + "content": "The feature loss \\(\\mathcal{L}_{feat}\\) is computed by taking the average absolute difference between the discriminator's internal layer outputs for the generated audio and those for the corresponding real audio." + }, + { + "type": "page_number", + "bbox": [ + 0.477, + 0.923, + 0.497, + 0.935 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.209, + 0.057, + 0.765, + 0.073 + ], + "angle": 0, + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.094, + 0.759, + 0.108 + ], + "angle": 0, + "content": "Table 16. The model size and RTF comparison between SemantiCodec and ALMTokensizer." + }, + { + "type": "table", + "bbox": [ + 0.31, + 0.108, + 0.663, + 0.161 + ], + "angle": 0, + "content": "
ModelModel size (M) (↓)RTF (↓)
SemantiCodec5070.92
ALMTokenizer (Ours)1740.031
" + }, + { + "type": "table", + "bbox": [ + 0.258, + 0.172, + 0.716, + 0.371 + ], + "angle": 0, + "content": "
ALMTokenizer
Input shape(B, 1, N)
Patchify module (output)(B, T, d), T=N/320
Token Interleaving and Retrievalw ∈ [2, 3, 4, 5, 6, 7, 8, 9, 10]
Dimension of transformer encoder256
The number of transformer encoder24
Dimension of transformer decoder512
The number of transformer decoder24
Codebook size2048
VQ layers3
Number of Transformer heads64
UnPatchify module (output)(B, 1, N)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.315, + 0.387, + 0.658, + 0.402 + ], + "angle": 0, + "content": "Table 17. ALMTokenizer model backbone configurations" + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.424, + 0.236, + 0.439 + ], + "angle": 0, + "content": "G.3. Training details" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.448, + 0.889, + 0.493 + ], + "angle": 0, + "content": "The AdamW optimizer is used in the training. We set the learn rate as \\( 1e - 4 \\). We train the model with 200k steps. The final loss as following shows. We set \\( \\lambda_{1} = 0.5 \\) and \\( \\lambda_{2} = 0.1 \\) during our experiments. We conduct all of the experiments with 4 NVIDIA A100-80G GPUs." + }, + { + "type": "equation", + "bbox": [ + 0.322, + 0.503, + 0.887, + 0.521 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\mathbf {L} _ {\\text {a d v}} + \\mathbf {L} _ {\\text {f e a t}} + \\mathbf {L} _ {\\text {r e c}} + \\lambda_ {1} \\mathbf {L} _ {\\text {M A E}} + \\lambda_ {2} \\mathbf {L} _ {\\text {A R}} \\tag {5}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.538, + 0.338, + 0.556 + ], + "angle": 0, + "content": "H. Reproducibility Statement" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.564, + 0.889, + 0.61 + ], + "angle": 0, + "content": "To enhance reproducibility, we provide the pseudocode of ALMTokensizer. In the future, we plan to improve both the model structure and training data to obtain more robust models, especially for music and sound, and release the code for the research community." + }, + { + "type": "code_caption", + "bbox": [ + 0.362, + 0.628, + 0.611, + 0.642 + ], + "angle": 0, + "content": "Listing 1. Pseudocode of ALMTokenizer" + }, + { + "type": "code", + "bbox": [ + 0.088, + 0.643, + 0.887, + 0.909 + ], + "angle": 0, + "content": "class ALMTokensizer: def __init__(self, transformerEncoder_args, transformerDecoder_args, maeDecoder_args, depth_gpt_args, patchify_args, encoder_embedding_dim, decoder_embedding_dim, semantic_prior_path, mask_rate, window_sizes = [2,3,4,5,6,7,8,9,10],): self(window_sizes = window_sizes self.transformerEncoder = Transformer(transformerEncoder_args) self.transformerDecoder = Transformer(transformerDecoder_args) self.maedecoder = Transformer(maedecoder_args) self.Patchify = EncodeEncoder(patchify_args) self.UnPatchify = EncodeDecoder(patchify_args)" + }, + { + "type": "page_number", + "bbox": [ + 0.477, + 0.923, + 0.496, + 0.936 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.209, + 0.058, + 0.765, + 0.071 + ], + "angle": 0, + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + }, + { + "type": "code", + "bbox": [ + 0.125, + 0.086, + 0.885, + 0.906 + ], + "angle": 0, + "content": "```c\nself.cls_token = nnParameter(torch.zeros(1, 1, encoder_embedding_dim))\nselfmasked_token = nnParameter(torch.zeros(1, 1, decoder_embedding_dim))\ncheckpoint = torch.load(semantic_prior_path, map_location=\"cpu\")\nself.vq = RVQ_semantic(\n input_dim=encoder_embedding_dim,\n semantic_prior = checkpoint,\n layers = 3)\nself.depth_gpt = GPT Decoder(depth_gpt_args)\nself.temp_window_size = 6\nself_mask_rate = mask_rate\ndef Encoder_token_Interleaving(self, x):\n B, T, D = x.shape # batch, length, dim\ncls_tokens = self.cls_tokenrepeat(B, (T//self.tmp_window_size), 1).unsqueeze(2)\n new_T = T + (T // self.tmp_window_size)\nx reshaped = x reshape(B, T // self.tmp_window_size, self.tmp_window_size, D)\nx_withCls = torch.cat([x reshaped, cls_tokens], dim=2)\nnew_x = x_withCls.reshape(B, -1, D)\nreturn new_x\ndef Encoder_token_Retrieval(self, x):\n B, new_T, D = x.shape\noriginal_T = new_T - new_T // (self.tmp_window_size + 1)\nmask Indices = [(i + 1) * (self.tmp_window_size + 1) - 1 for i in range(original_T // self.tmp_window_size)]\ncls_tokens = new_x[;, mask Indices, :]\nreturnCLS_tokens\ndef Decoder_token_Interleaving(self, en_token):\n B, T, D = en_token.shape\nx = self-mask_tokenrepeat(B, 1, 1)\nnew_T = en_token.shape[1] * self.tmp_window_size + en_token.shape[1]\nx = x.repeata(1, en_token.shape[1] * self.tmp_window_size, 1)\nx = x.reshape(B, -1, self.tmp_window_size, D)\nx_with Masks = torch.cat([x, en_token.unsqueeze(2)], dim=2)\nnew_x = x_with Masksreshape(B, -1, D)\nreturn new_x\ndef Decoder_token_Retrieval(self, new_x):\n B, new_T, D = new_x.shape\nnum_masks = new_T // (self.interval + 1)\noriginal_T = new_T - num_masks\nmaskIndices = [(i + 1) * (self.interval + 1) - 1 for i in range(num_masks)]\nallIndices = list(range(new_T))\nmaskIndices = [i for i in allIndices if i not in maskIndices]\nmask Frames = new_x[;, maskIndices,:]\nreturn mask Frames\ndef forward(self, x):\n x_len = x.shape[-1]\nself.temp_window_size = choice(selfwindow_sizes)\nemb Frames = self.Patchify(x)\nif self.trainin:\n emb Frames_mask = self.apply_mask(emb Frames, mask_rate = self-mask_rate)\ninterleaving Frames = self.Encoder_token_Interleaving(emb Frames_mask)\npredictDSP = self.maedecoder(interleavingFrames)\nmae_loss = L1_loss(predictDSP, emb Frames)\nlatent_tokens = self.transformer Encoder(interleavingFrames)\nquery_token = self.Encoder_token_Retrieval(latent_tokens)\nQuantized_token, codes, allquantized = self.vq(query_token)\ncat_quantized = []\nfor q_emb in all_quantized:" + }, + { + "type": "page_number", + "bbox": [ + 0.477, + 0.924, + 0.496, + 0.935 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.209, + 0.058, + 0.763, + 0.071 + ], + "angle": 0, + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + }, + { + "type": "code", + "bbox": [ + 0.156, + 0.086, + 0.791, + 0.201 + ], + "angle": 0, + "content": "q_emb = q_emb.reshape(-1, q_emb.shape[-1]).unsqueeze(1) \ncat_quantized.append(q_emb) \ncat_quantized = torch.cat(cat_quantized, dim=1) \ngpt_loss = self.depth_gpt.compute_prior_loss(cat_quantized) \nde_interleaving Frames = self.Decoder_token_Interleaving(Quantized_token) \ndelatent_token = self.transformer Decoder(de_interleaving Frames) \nmask_tokens = self.Decoder_token_Retestval(de_forensic_token) \nx_ = self.UnPatchify mask_tokens) \nreturn x_, mae_loss, gpt_loss" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.228, + 0.199, + 0.243 + ], + "angle": 0, + "content": "I. Limitation" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.254, + 0.89, + 0.421 + ], + "angle": 0, + "content": "In this study, we present ALMTokenizer, a low-bitrate, semantic-rich audio codec tokenizer. We demonstrate that ALM-Tokenizer excels in both reconstruction and semantic information retention under low-bitrate conditions. However, we acknowledge that there is still significant room for improvement in reconstruction performance, particularly for sound and music data. Building an audio tokenizer for sound and music in the low-bitrate setting poses additional challenges. In terms of semantic information, ALMTokenizer still lags behind traditional SSL models. Although we propose several training losses to enhance semantic information in the codec model, the improvements are limited and, in some cases, negatively impact reconstruction quality. We recognize the need for a careful design and balance of these semantic loss terms. Additionally, the multi-stage training strategy increases training complexity. These training strategy brings waste. Most of the components are eventually discarded, e.g. MAE-transformer encoder/decoder, MAE-decoder, and depth AR-transformer. These components would have made sense to still utilize them for some purpose, e.g. the AR decoder could have been used to initialize the depth transformer in the Language modeling task. These concerns are left for future work." + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "23" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10344/cbca1f99-6d1c-4e0d-b299-960633d34c1c_origin.pdf b/data/2025/2504_10xxx/2504.10344/cbca1f99-6d1c-4e0d-b299-960633d34c1c_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..44f29c80944c24b0350765d3888aba490cc754e4 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/cbca1f99-6d1c-4e0d-b299-960633d34c1c_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af692b2029cd8639a561a1a003773eddcb7388091496852af337a7f5e7efa8fa +size 2660511 diff --git a/data/2025/2504_10xxx/2504.10344/full.md b/data/2025/2504_10xxx/2504.10344/full.md new file mode 100644 index 0000000000000000000000000000000000000000..1130c44e2cdd6c23a1b32b56bae435bb9c921fed --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/full.md @@ -0,0 +1,610 @@ +# ALMTokenizer: A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling + +Dongchao Yang $^{1}$ Songxiang Liu $^{2}$ Haohan Guo $^{1}$ Jiankun Zhao $^{1}$ Yuanyuan Wang $^{1}$ Helin Wang $^{2}$ Zeqian Ju $^{2}$ Xubo Liu $^{2}$ Xueyuan Chen $^{1}$ Xu Tan $^{2}$ Xixin Wu $^{1}$ Helen Meng $^{1}$ + +# Abstract + +Recent advancements in audio language models have underscored the pivotal role of audio tokenization, which converts audio signals into discrete tokens, thereby facilitating the application of language model architectures to the audio domain. In this study, we introduce ALMTokenizer, a novel low-bitrate and semantically rich audio codec tokenizer for audio language models. Prior methods, such as Encodec, typically encode individual audio frames into discrete tokens without considering the use of context information across frames. Unlike these methods, we introduce a novel query-based compression strategy to capture holistic information with a set of learnable query tokens by explicitly modeling the context information across frames. This design not only enables the codec model to capture more semantic information but also encodes the audio signal with fewer token sequences. Additionally, to enhance the semantic information in audio codec models, we introduce the following: (1) A masked autoencoder (MAE) loss, (2) Vector quantization based on semantic priors, and (3) An autoregressive (AR) prediction loss. As a result, ALMTokenizer achieves competitive reconstruction performance relative to state-of-the-art approaches while operating at a lower bitrate. Within the same audio language model framework, ALMTokenizer outperforms previous tokenizers in audio understanding and generation tasks. $^{1}$ + +# 1. Introduction + +The field of generative modeling has witnessed remarkable progress, largely driven by the success of autoregressive + +*Equal contribution 1The Chinese University of Hong Kong, Hong Kong, China 2Independent Authors. Correspondence to: Dongchao Yang . + +(AR) models in the development of large language models (LLMs) (OpenAI, 2023). Inspired by the success of LLMs in the fields of natural language processing (NLP), recent works have begun to employ AR transformers for audio generation (Borsos et al., 2023a; Agostinelli et al., 2023; Yang et al., 2023c), such as using the AR transformer paradigm to solve text-to-speech task (Wang et al., 2023), or expanding the text LLM into multimodal LLM by integrating the audio modality into the original LLM (Défossez et al., 2024). Audio tokenizer plays an important role in all of these models, which converts audio signals into discrete token sequence for AR audio language modeling. + +In the literature, audio codec models, such as SoundStream (Zeghidour et al., 2021) and Encodec (Défossez et al., 2022), have been widely adopted as audio tokenizers for audio language models. These generative models aim to represent audio data in a quantized discrete latent space, where the codec's decoder is then used to reconstruct the audio signals from the generated discrete token sequences. Recently, there has been significant interest in the audio community regarding audio codec tokenizers, leading to the proposal of several novel models (Kumar et al., 2023; Ji et al., 2024; Défossez et al., 2024; Parker et al., 2024; Zhang et al., 2023). Despite the advancements in audio codec models, an important research question remains unanswered: which type of audio codec is most suitable for audio language modeling? Inspired by previous works (Borsos et al., 2023a; Parker et al., 2024; Ji et al., 2024; Défossez et al., 2024), these studies investigate two key properties of audio codec models: low bitrate and semantic richness. We first conduct a set of evaluation experiments to explore the influence of bitrate and semantic information on audio language modeling. Specifically, we train three audio codec models with varying bitrates, while keeping the number of vector quantization (VQ) layers constant and adjusting the frame rates to $50\mathrm{Hz}$ , $25\mathrm{Hz}$ , and $12.5\mathrm{Hz}$ . We then train the audio language model using different audio tokenizers on the same dataset. To assess the impact of semantic information, we also train a $12.5\mathrm{Hz}$ semantic tokenizer and incorporate it into the audio language model. Further details can be found in Appendix B. Figure 1 presents the results, which show that: (1) low-bitrate audio codec models significantly en + +hance training and inference efficiency; and (2) semantic information is more easily modeled by LM-based generative methods, e.g. lower PPL and loss. The experimental findings demonstrate the importance of constructing a low-bitrate and semantic-rich audio codec tokenizer for audio language modeling. Based on these results, we propose a novel audio codec tokenizer that offers the following advantages: (1) Low-bitrate: it compresses the audio data into fewer tokens; (2) Semantic-rich: it incorporates abundant semantic information; (3) AR-driven latent space: it optimizes the latent space for autoregressive (AR) modeling. + +To achieve this objective, we propose the following novel techniques: (1) We introduce a novel query-based compression strategy, which uses a set of learnable query tokens to capture holistic information by explicitly modeling the context information across audio frames with transformer layers. This strategy effectively takes advantage of the strong modeling capabilities of transformers to achieve better compression and semantic modeling. It also enables dynamic control over the compression rate by adjusting the number of query tokens. (2) To enhance semantic richness in the codec model, we introduce a Masked Autoencoder (MAE) loss, which encourages the model to capture more global information. (3) Inspired by previous works (Zhu et al., 2024), we propose the integration of semantic priors into the VQ layer. Specifically, we perform k-means clustering on the pre-trained wav2vec2 (Baevski et al., 2020) and BEATs (Chen et al., 2022b) encoder outputs, using the cluster centers to initialize the VQ layer. (4) We observe that AR models struggle to fit the distribution of the residuals in the VQ layers, with token prediction accuracy being notably lower in the second and third VQ layers compared to the first. To address this issue, we introduce an AR prediction loss to optimize the latent space. + +To evaluate the effectiveness of the ALMTokenizer, we first compare its reconstruction and semantic performance with previous state-of-the-art models. Using the same audio language model framework, we then demonstrate that ALMTokenizer achieves superior performance in LM-based audio understanding and generation tasks, including text-to-speech (TTS), speech-to-text (ASR), audio captioning, text-to-sound, text-to-music, and music captioning. + +# 2. Related Works + +# 2.1. Audio Language Models + +Recently, there has been a growing interest in bridging audio and text through multimodal learning approaches. Models such as AudioLM (Borsos et al., 2023a) leverage AR transformers and hierarchical modeling techniques to process audio data directly, learning representations that capture both linguistic and acoustic features. Inspired by AudioLM, VALL-E (Wang et al., 2023) and SPEAR-TTS (Kharitonov + +![](images/1f2afdd26c38ad12bec4637ffdc1de7b03af211f664c49636beea44b22135499.jpg) +Figure 1. The performance comparison when different types of tokenizer is used for audio modeling. PPL refers to perplexity. + +et al., 2023) formulate the text-to-speech task as an audio language modeling problem: generating an audio token sequence with the help of an autoregressive transformer. MusicLM (Agostinelli et al., 2023) and MusicGen (Copet et al., 2023) frame the text-to-music task as an audio language modeling problem. UniSep (Wang et al., 2025) explores using audio LM to solve audio separation tasks with the help of audio tokenizer. Moshi (Défossez et al., 2024), SpiRitLM (Nguyen et al., 2025), and GLM4-Voice (Zeng et al., 2024) explore speech-to-speech conversation. Furthermore, audio tokenizers can also be combined with discrete diffusion models (Yang et al., 2023d;a; Borsos et al., 2023b; Ju et al., 2024). In all of these models, the audio tokenizer plays a crucial role by transforming audio data into a discrete latent sequence, reducing computational demands compared to directly processing the audio signal, and enhancing the effectiveness and efficiency of the generation process. + +# 2.2. Audio Tokenizer + +In the literature, both semantic and acoustic tokenizers are widely employed in audio language models. The semantic tokenizer is trained using pre-trained self-supervised learning (SSL) models, such as Hubert (Hsu et al., 2021) and WavLM (Chen et al., 2022a). Applying k-means or vector quantization in these models generates semantic tokens (Zeng et al., 2024; Du et al., 2024; Liu et al., 2024). Previous works (Borsos et al., 2023a) demonstrate that semantic tokens are more easily modeled by language models. However, due to the loss of significant acoustic information in semantic tokens, they rely on an additional decoder to generate high-fidelity waveform, such as a diffusion model (Ho et al., 2020) or flow-matching (Lipman et al., 2022). Inevitably, this additional module results in increased infer + +![](images/3c618b74f688548add74bec89c6981564b20b5fb43a06e3d3e40ad5bc0570a5f.jpg) +Figure 2. The left part illustrates the framework of the previous audio codec, while the right part provides an overview of the proposed ALMTokensizer. $w$ denotes the window size. The details of ALMTokensizer can be found in Section 3.2. + +![](images/929236d9a847bdafe944104601209dbf98eadb1d765b6edf85d4aa3a2145a540.jpg) + +ence complexity and poorer reconstruction. + +Acoustic tokenizer refers to audio codec models, trained for acoustic-level reconstruction tasks. Audio codec (Zeghidour et al., 2021; Defossez et al., 2022; Yang et al., 2023b; Kumar et al., 2023) have demonstrated exceptional performance in reconstructing high-quality audio. In general, these codec models consist of an encoder, a quantizer, and a decoder. Both the encoder and decoder are lightweight, resulting in minimal inference costs. Compared to semantic tokens, codec models can support audio, speech, and music domains, and their rich acoustic details mitigate the need for cascading architectures in downstream generative models. Recently, an increasing number of audio codec models have been proposed, focusing on (1) Better reconstruction quality, such as DAC (Kumar et al., 2023), Vocos (Siuzdak, 2023), SQ-Codec (Yang et al., 2024c;b) and APCodec (Ai et al., 2024); (2) Low-bitrate models, such as HiFiCodec (Yang et al., 2023b), wavtokenizer (Ji et al., 2024), StableCodec (Parker et al., 2024), and TS3-Coded (Wu et al., 2024); (3) Task-driven codec, designed for text-to-speech tasks, such as FACodec (Ju et al., 2024), SpeechTokenizer (Zhang et al., 2023), Single-Coded (Li et al., 2024), audio retrieval-based Tokenizers (Banerjee & Arora, 2022; van Niekerk et al., 2024). In this study, we focus on developing a low-bitrate, semantically rich audio codec tokenizer. The most closely related work to ours is MimiCodec (Defossez et al., 2024), which provides high-quality semantic information while achieving a low bitrate (1.1 kbps). However, MimiCodec relies on knowledge distillation from WavLM (Chen et al., 2022a) to the first VQ layer, whereas the remaining VQ layers do not incorporate semantic information. Furthermore, it is specifically designed for speech tasks and has not been validated for non-speech tasks, such as sound and music generation. In contrast to MimiCodec, our ALMTokens encode more semantic information across all VQ layers, achieves a lower bitrate, and is designed for both speech and + +general sound. + +# 3. Proposed Method + +This section introduces the technical details of the proposed ALMTokensizer. Section 3.1 presents the framework of previous audio codec models. Section 3.2 presents the details of proposed audio codec framework. In Sections 3.3 and 3.4, we present the training loss and training strategies. + +# 3.1. Preliminary + +Previous audio codec (Défossez et al., 2022; Zeghidour et al., 2021) typically adopt an encoder-quantizer-decoder framework, as shown in the left part of Figure 2. The audio is encoded into several audio frames by the encoder. Then, residual vector quantization (RVQ) (Zeghidour et al., 2021) is used to quantize these audio frames. Lastly, the decoder is used to recover the waveform from the quantized audio frames. It can be observed that previous works treat each audio frame equally and rely on these quantized frames to recover the audio. However, such a strategy (1) ignores the fact that different audio frames encode different levels of information, which results in some audio frames being difficult to recover in low-bitrate settings (e.g., encoding the audio frames at $12.5\mathrm{Hz}$ ); (2) fails to utilize the context information between different frames. + +# 3.2. Query-based Audio Compression + +To construct a low-bitrate, semantically rich audio codec model, we propose a query-based compression strategy. Our approach is inspired by the success of MAE (He et al., 2022), which applies a masking operation to the original image with a high mask rate (75%). With the help of a transformer encoder and decoder, it is possible to recover the masked + +image content by utilizing the context information between different patches. Thus, we propose using a group of query tokens ${}^{2}$ to capture holistic audio context information from the audio frames with the assistance of a transformer encoder. Since these query tokens include rich context information, it is possible to reconstruct the audio based on them. Then, a transformer decoder and mask tokens are employed to reconstruct the audio from the quantized query tokens. This strategy leverages the powerful modeling capabilities of transformers to achieve better compression and semantic modeling. Similar query-based strategies has been widely explored in previous works, such as BLIP2 (Li et al., 2023), SALMONN (Tang et al., 2024) and TiTok(Yu et al., 2024). The right part of Figure 2 illustrates the overall framework of ALMTokensizer. In the following sections, we detail each component and the associated training loss. + +Patchify and UnPatchify We explore two types of Patchify modules: (1) Following Encodec (Défossez et al., 2022), a convolution-based module, which encodes the audio data $\mathbf{x}$ into $e \in \mathcal{R}^{T \times d}$ , where $T$ and $d$ denote the number of frames and the vector dimension, and (2) Following StableCodec (Parker et al., 2024), which directly uses a linear layer to encode the audio data into $e \in \mathcal{R}^{T \times d}$ and adds several transformer layers. Similarly, the UnPatchify mirrors the architecture of Patchify. If we use the Encodec-style Patchify module, the UnPatchify module substitutes stride convolutions with transposed convolutions and reverses the stride order. If we use the StableCodec-style Patchify module, the UnPatchify module includes a transformer block and a reshape operation. In our preliminary experiments, we find that the Encodec-style Patchify and UnPatchify modules bring better reconstruction performance. We adopt the Encodec-style Patchify module as our default setting. + +Token Interleaving The token interleaving module aims to combine two token sequences into a single sequence. In the encoder part, we combine the audio frames $e \in \mathcal{R}^{T \times d}$ and the query token [CLS]. Assuming a window size of $w$ , the query token will be inserted into the audio frame sequence at every $w$ -intervals. In the decoder part, the token interleaving module is used to combine the quantized query tokens and learnable mask tokens. We insert $w$ mask tokens before each query token. During the training stage, we dynamically choose the window size for each training iteration. + +Token Retrieval The token retrieval module aims to retrieve the relevant tokens from a sequence. In the encoder part, we use it to retrieve the learnable query tokens. In the decoder part, we use it to retrieve the learnable mask tokens. + +Query-based Transformer Encoder As the previous part + +discussed, we introduce a learnable query token $[\mathrm{cls}] \in \mathcal{R}^{1 \times d}$ to capture holistic information from the audio frames $e$ . As Figure 2 shows, we first combine the audio frames and query token using a token interleaving module with a window size $w$ . Then, a transformer module is applied to model the whole sequence $e_a$ . After that, we employ a token retrieval module to extract the query tokens $h \in \mathcal{R}^{[T / w] \times d}$ . + +$$ +\begin{array}{l} \boldsymbol {e} = P (\boldsymbol {x}), \boldsymbol {e} _ {\boldsymbol {a}} = I n t e r l e a v i n g (\boldsymbol {\mathbf {e}}, \boldsymbol {c l s}, w), \tag {1} \\ \boldsymbol {e} _ {\boldsymbol {a}} = E n (\boldsymbol {e} _ {\boldsymbol {a}}), \boldsymbol {h} = R e c t r i e v a l (\boldsymbol {e} _ {\boldsymbol {a}}, w) \\ \end{array} +$$ + +where $P(\cdot)$ denotes the Patchify module. $En(\cdot)$ denotes the transformer encoder. + +Residual Vector Quantization To build a low-bitrate audio codec, we empirically set the number of RVQ layers to 3, since we found that 3 RVQ layers suffice to build an effective audio codec model: $\hat{h} = Q(h)$ . Inspired by previous works (Zhu et al., 2024; Yang et al., 2024a), we first obtain the k-means clusters of Wav2vec2 (Baevski et al., 2020) to represent the speech semantic prior, and the k-means clusters of the BEATs (Chen et al., 2022b) to represent the general sound semantic prior. Assuming the codebook size is $C$ , we set $C / 2$ to represent speech, with the remaining portion representing general sound. We then use these semantic priors to initialize the codebook of the VQ layer and fix it. Next, we apply a linear layer to map the input features into the VQ layer. + +Query-based Transformer Decoder To recover the audio information, we construct a reverse process using the encoder part. We first use the token interleaving module to combine the mask token $m \in \mathcal{R}^{1 \times d}$ with $\hat{\pmb{h}}$ . The new sequence is then modeled by a transformer module. We expect that these mask tokens can be used to recover the audio information with the help of the Unpatchify module. + +$$ +\begin{array}{l} \boldsymbol {q} _ {\boldsymbol {a}} = \text {I n t e r l e a v i n g} (\hat {\boldsymbol {h}}, \boldsymbol {m}, w), \boldsymbol {q} _ {\boldsymbol {a}} = D e (\boldsymbol {q} _ {\boldsymbol {a}}) \tag {2} \\ \boldsymbol {e} _ {\boldsymbol {o}} = \operatorname {R e c t r i e v a l} (\boldsymbol {q} _ {\boldsymbol {a}}, w), \hat {\boldsymbol {x}} = U n P (\boldsymbol {e} _ {\boldsymbol {o}}), \\ \end{array} +$$ + +where $Unp(\cdot)$ denotes the Unpatchify module. $De(\cdot)$ denotes the transformer decoder. + +# 3.3. Training Loss + +Similar to previous audio CODECs, our approach is based on a GAN objective, where we optimize both the generator (which consists of the Patchify module, transformer encoder, quantizer, transformer decoder, and UnPatchify module) and the discriminators. For the generator, the training loss comprises four components: (1) reconstruction loss term; (2) adversarial loss term; (3) Masked AutoEncoder (MAE) loss; and (4) AR prediction loss. The reconstruction and adversarial losses typically follow previous works (Défossez et al., 2022; Zeghidour et al., 2021). In the following, we describe the MAE loss and AR prediction loss. More details of training loss refer to Appendix G. + +MAE Loss As we discussed in Section 1, a semantic-rich audio codec tokenizer is better suited for audio language modeling. Inspired by the success of MAE (He et al., 2022), we propose to incorporate an MAE loss during the training of the audio codec. Specifically, for the frame sequence $e$ , we randomly choose several audio frame features and set these frames to zero, $e_m = \mathrm{Mask}(e)$ . We pass the masked features $e_m$ into the encoder transformer. Then, the encoded features are passed into an MAE-decoder transformer block to predict $e$ . In our experiments, we adopt a dynamic mask rate (from 0.2 to 0.3), we found that using a large mask rate will significantly influence the reconstruction performance. Following MAE (He et al., 2022), we apply the MSE loss to the masked audio frames. + +AR Loss As shown in figure 3, we find that the first layer of RVQ-based audio codec models is easier to fit for the audio language model than the other layers (e.g., layers 2 and 3). One possible reason is that the first layer encodes more semantically related information. For speech data, most of the content information can be recovered by the first VQ layer, while the residual layers primarily encode acoustic-level information, which influences speech quality. To make the tokens in the residual layer easier to fit, we introduce an autoregressive (AR) prediction prior (Wang et al., 2024a) in the RVQ latent space. Specifically, we introduce a lightweight continuous autoregressive (AR) transformer3, which is used to conduct next-token prediction in the RVQ layer. For example, it is tasked with predicting the quantized feature of the third VQ layer based on the features of the first and second VQ layers. We use mean squared error (MSE) loss for optimization. + +# 3.4. Two-stage Training Strategy + +Although training the ALMTokenizer using the typical Encoder (Défossez et al., 2022) setting is feasible, we introduce a two-stage training paradigm to improve both reconstruction performance and semantic information. Our motivation stems from the fact that audio codec quantization focuses on modeling local relationships, whereas semantic information focuses on modeling global relationships. These two goals are in conflict. To resolve this conflict, we present a two-stage training strategy. In the first stage, we do not incorporate the quantization part; instead, we train directly an AutoEncoder with Patchify and UnPatchify modules. To encode more semantic information in the Patchify module, we introduce MAE loss during this stage, by adding transformer-based MAE-encoder and decoder. The encoder processes the masked frame sequence, and the decoder pre + +dicts the masked part. After training, the transformer encoder and decoder are discarded. In the second stage, we first initialize the ALMTokensizer's Patchify and UnPatchify modules with the checkpoint from the first stage, and freeze the parameters of the Patchify module. Then, we train the model using the training loss described in Section 3.3. + +# 4. Experiments + +# 4.1. Dataset and Training Details + +Data preparation for the audio codec ALMTokensizer is trained on approximately 4,500 hours of data. In the speech domain, we utilize LibriTTS training set (Zen et al., 2019) and a subset of Multilingual LibriSpeech (MLS) (Pratap et al., 2020), with 2,000 hours randomly selected. In the sound domain, we utilize a subset of AudioSet, with 1,000 hours randomly selected; in the music domain, we employ a subset of the Million Song Dataset (Bertin-Mahieux et al., 2011), also with 1,000 hours randomly selected. We evaluate the codec's speech reconstruction performance using a subset of the VCTK dataset (Veaux et al., 2017), and assess both audio and music reconstruction performance using the AudioCaps (Kim et al., 2019) validation set and the MusicCaps dataset (Agostinelli et al., 2023), respectively. + +Data for Audio Language Models To assess the effectiveness of the proposed audio tokenizer, we construct an audio language model framework to perform six audio-related tasks. The details are provided in Appendix D.3 and D.4. For speech data, we select 2,000 hours of speech-text pairs from LibriHeavy (Kang et al., 2024). For sound data, we utilize the AudioCaps training set and BBC Sound Effects. For music data, we use a subset of the Million Song dataset and the caption data from LP-MusicCaps (Doh et al., 2023). + +Implementation Details ALMTokenizer first performs patchification on the audio data, we set the patch size to 320 in all of experiments, which encodes 1 second of $24\mathrm{kHz}$ audio into 75 frames. For the Encoder-style Patchify module, we adopt the settings from Encodec (Défossez et al., 2022) encoder. To enable streaming for the codec model, a causal convolution layer is employed. For the encoder-transformer and decoder-transformer components, we use 24 self-attention layers, with latent dimensions of 256 and 512, respectively. Following StableCodec (Parker et al., 2024), the self-attention mechanism uses a causal sliding attention window of 64 steps to restrict the receptive field and promote the generalization of the architecture to sequences of arbitrary length. Rotary Positional Embeddings (RoPE) are used. Refer to Appendix G for the details of ALMTokenizer model training. For the audio language model, we follow the framework of Moshi (Défossez et al., 2024). For further details, refer to Appendix A. + +Table 1. The speech reconstruction and semantic performance comparison between the ALMTokensizer and previous tokenizers. FPS denotes that the frame number in one second. TPS denotes that the token number in one second. CS denotes the codebook size, BR denotes the bit-rate. ST denotes speechtokenizer. Bold for the best result and underline for the second-best result. Evaluation on VCTK dataset. + +
ModelsFPS/TPSCS/BRReconstructionSemantic
UTMOS (↑)DNS-MOS (↑)VISQOL (↑)STOI (↑)PESQ (↑)ASR (↓)ER (↑)
Hubert (Hsu et al., 2021)-------6.531.0
WavLM (Chen et al., 2022a)-------6.229.0
Encodec (Défossez et al., 2022)50/1501024/1.5kbps2.583.273.640.812.035.326.5
DAC (Kumar et al., 2023)50/1501024/1.5kbps3.133.413.670.812.144.117.6
Wavtokenizer (Ji et al., 2024)40/404096/0.48kbps3.673.503.720.791.944.619.8
StableCodec (Parker et al., 2024)25/2546656/0.4kbps4.223.643.400.761.898.315.8
ST (Zhang et al., 2023)50/1501024/1.5kbps3.413.363.680.791.719.827.0
Mimi (Défossez et al., 2024)12.5/37.52048/0.41kbps3.013.143.280.751.525.128.0
Mimi (Défossez et al., 2024)12.5/1002048/1.1kbps3.653.383.820.822.123.828.3
ALMTokensizer (Ours)12.5/37.52048/0.41kbps3.763.643.780.812.018.329.0
+ +# 4.2. Evaluation Metrics + +We evaluate the performance of previous SOTA audio tokenizers, and our proposed ALMTokensizer across audio reconstruction, audio semantic information, audio understanding, and audio generation tasks. + +Audio Reconstruction For speech reconstruction, we use DNS-MOS, UT-MOS, PESQ, STOI (Short-time Objective Intelligibility), and VISQOL. For sound and music data evaluation, VISQOL (audio version), STFT loss, and Mel loss are used. Furthermore, following (Kumar et al., 2023), the MUSHRA subjective test is conducted for speech, sound, and music. Refer to Appendix D for more details. + +Audio Semantic Information Previous SSL models, such as Hubert (Hsu et al., 2021), have shown that semantic-rich representation can be used to solve downstream recognition tasks by fine-tuning several adaptor layers. Thus, we can validate the performance of features of the audio tokenizer for downstream recognition tasks. For speech data, we conduct the automatic speech recognition (ASR) task on the LibriSpeech (Panayotov et al., 2015) dataset, and the emotion classification (EC) task on the EMOVO (Costantini et al., 2014) dataset. For sound data, we conduct sound classification tasks on the ESC-50 dataset (Piczak, 2015). For music data, we conduct music classification tasks on the Medley-solos-DB dataset (Lostanlen & Cella, 2016). + +Audio Understanding To further validate whether the audio tokenizer is suitable for building an audio language model, we propose to conduct an understanding task using discrete tokens. We conduct three tasks: ASR, audio caption, and music caption. For the audio data, we use the audio tokenizer to transform it into discrete tokens, and for text data, we use the BPE tokenizer of LLAMA 3.2. For audio and music caption, we follow (Drossos et al., 2020) and adopt BLEU-1, BLEU-2, BLEU-3, METEOR, ROUGE-L, CIDEr-D, SPICE, and SPIDER metrics. + +Table 2. The sound reconstruction performance comparison between the proposed ALMTokensizer and previous audio tokenizer models. SC denotes the sound classification task. Evaluation on AudioCaps validation set. + +
ModelsViSQOL (↑)Mel loss (↓)STFT loss (↓)SC (↑)
BEATs---24%
Wav2vec2---53%
Encodec3.0516.31.2315%
DAC2.9817.61.2420%
Wavtokenizer2.1832.72.5012%
Ours2.9915.01.2444%
+ +Table 3. The music reconstruction and semantic performance comparison between the ALMTokensizer and previous audio tokenizers. MC denotes the music classification task. Evaluation on Musiccaps dataset. + +
ModelsViSQOL (↑)Mel loss (↓)STFT loss (↓)MC (↑)
BEATs---54%
Wav2vec2---65%
Encodec4.0434.81.2645%
DAC4.0635.91.2848%
Wavtokenizer3.8548.21.4754%
Ours3.9634.41.3259%
+ +Audio Generation We also conduct audio generation tasks, including text-to-speech, text-to-sound, and text-to-music. Refer to Appendix D for more details. + +# 4.3. The Reconstruction and Semantic Performance + +We first compare the reconstruction and semantic performance of ALMTokensizer with previous audio tokenizers. Table 1 presents the speech reconstruction and semantic results. We observe the following: (1) In terms of reconstruction, ALMTokensizer achieves impressive results in the low-bitrate setting. For example, compared with previous SOTA models, MimiCodec and Wavtokenizer, ALMTokensizer achieves better reconstruction performance at a lower bitrate. We also note that StableCodec performs well on UT- + +Table 4. The LM-based TTS and ASR results. The first three metrics are used for TTS, while the last one is used for ASR. GLM4-Voice (Zeng et al., 2024) is a single layer semantic tokenizer. Evaluation on LibriSpeech test clean set. + +
ModelsWER (↓)DNSMOS (↑)UT-MOS (↑)ASR (↓)
GLM4-voice9.93.963.7916.3 ± 1.5
DAC24.53.142.0658.4 ± 1.2
Encodec22.93.482.1477.2 ± 2.3
StableCodec22.73.633.7028.0 ± 1.9
Wavtokenizer18.53.723.5845.6 ± 2.7
MimiCodec16.03.672.9323.1 ± 1.5
Ours11.73.753.8819.6 ± 1.8
+ +MOS. The main reason is that StableCodec has denoising capabilities, while the original audio includes some noise. This explains why StableCodec achieves good results on UTMOS but performs poorly on PESQ and STOI. (2) In terms of semantic information, ALMTokensizer demonstrates superior performance, e.g., ALMTokensizer outperforms previous SOTA models, such as Wavtokenizer and StableCodec $^{4}$ . Notably, in the emotion classification task, ALMTokensizer achieves performance comparable to previous SSL models, such as Hubert and WavLM. However, we also note that ALMTokensizer still lags behind these SSL models in ASR performance. We speculate that the inclusion of acoustic information may detract from ASR performance, despite ALMTokensizer containing rich semantic information. Table 2 and 3 show the sound and music experimental results. We can see that ALMTokensizer demonstrates strong reconstruction performance under the low-bitrate setting. Compared to WavTokenizer, the reconstruction performance shows significant improvement. Furthermore, we also note that sound and music are inherently more complex than speech, and encoding them at very low-bitrate remains a challenge. In terms of semantic information, ALMTokensizer significantly surpasses previous works, such as WavTokenizer and Encodec. In comparison with SSL models, BEATs (Chen et al., 2022b) and Wav2vec2-audioset version, ALMTokensizer shows comparable performance. We also perform the MUSHRA subjective test for the reconstruction performance. As shown in Table 7, we find that ALMTokensizer effectively maintains strong subjective reconstruction performance on speech, music, and audio, even with a very low-bitrate setting. + +# 4.4. Audio Understanding and Generation Results + +Speech Understanding and Generation Tasks Table 4 shows the LM-based TTS and ASR results. For the TTS task, we mainly focus on robustness and speech quality. In terms of robustness, we can see that the GLM4-voice tokenizer (Zeng et al., 2024), MimiCodec, and the pro + +![](images/d14fe6162b3ac4c03543d5fc61d9d14b856032b8180431b1c829610ad257161d.jpg) +Figure 3. The performance comparison with or without AR loss. + +posed ALMTokensizer bring better performance than others, highlighting the importance of semantic information for LM-based speech generation. Compared to previous audio codec tokenizers, ALMTokensizer brings significant improvement. In terms of generated speech quality, ALMTokensizer also shows great advantages, further demonstrating that the proposed tokenizer is more suitable for audio language modeling. Similarly, when we conduct the ASR task using discrete tokens as input, semantic information is also important. Traditional audio codec models perform poorly in this setting, such as DAC, Encodec, and WavTokenizer. StableCodec was fine-tuned by using a CTC head to predict the force-aligned phoneme tags from pre-bottleneck latents. MimiCodec distills the semantic information from WavLM. Thus, they have better performance than previous codec models. In ALMTokensizer, we propose a novel codec framework and training loss to better encode semantic information in the codec model. + +Sound/music Understanding and Generation Results We conduct text-to-sound, text-to-music, audio caption and music caption tasks within the same audio language model framework. The experimental results shown in Table 5 indicate that ALMTokensizer shows better performance in both audio caption and audio generation tasks, further demonstrating its advantages. We put more audio tokenizer reconstruction performance experiments on Appendix F, including evaluation on LibriTTS test set, length generalization, and compared to diffusion-based audio codec models. + +# 4.5. Ablation Study + +In order to gain a more comprehensive understanding of ALMTokensizer, we systematically compared each key component using a controlled experimental setup, employing identical architectures and hyperparameters across all trials. The Effectiveness of Query-based Audio Compression In this study, we propose a query-based audio compression strategy for compressing audio data in a very low-bitrate + +Table 5. The LM-based sound, music understanding and generation. B1, B2, B3, RG, ME, CD, SP, and SD denote BLEU-1, BLEU-2, BLEU-3, METEOR, ROUGE-L, CIDEr-D, SPICE, and SPIDER, respectively. Evaluation on Audiocaps and Musiccaps datasets. + +
ModelsUnderstandingGeneration
B1 (↑)B2(↑)B3 (↑)ME (↑)RG (↑)CD (↑)SP (↑)SD (↑)FD (↓)FAD (↓)KL (↓)
Sound Task
Encodec0.250.150.080.110.240.570.140.3510.038.221.73
DAC0.260.150.080.110.260.510.130.3214.1411.71.55
Wavtokenizer0.240.140.080.100.220.380.110.256.764.551.28
ALMTokensizer (Ours)0.280.170.110.120.240.600.150.374.116.160.55
Music Task
Encodec0.300.140.080.110.230.370.090.237.225.481.06
DAC0.290.140.080.110.230.370.090.2312.898.361.68
Wavtokenizer0.190.060.020.060.130.060.050.054.3911.930.88
ALMTokensizer (Ours)0.340.150.070.130.250.440.100.273.554.580.43
+ +Table 6. Ablation study of codec framework, training loss, and training strategy. ASR and ER are used to evaluate the semantic information. The others are used to evaluate the reconstruction performance. Experiments conduct on VCTK dataset. + +
SettingUTMOS (↑)DNSMOS (↑)VISQOL (↑)PESQ (↑)STOI (↑)ASR (↓)ER (↑)
ALMTokensizer3.763.643.782.00.8118.329.0
Framework ablation
w/o the query-based framework2.493.133.371.580.7734.522.6
w/o Three additional loss3.543.413.441.690.7827.224.5
Training loss ablation
w/o semantic prior for VQ3.793.663.782.120.8319.228.4
w/o MAE loss3.703.763.832.100.8224.523.2
w/o AR loss3.723.813.802.080.8218.830.2
Different Patchify module
use Linear-Patchify3.473.363.271.780.7820.326.7
Training strategy ablation
w/o two-stage training3.603.393.241.550.7422.825.9
+ +setting. To validate its effectiveness, we follow previous audio codec models, such as MimiCodec (Défossez et al., 2024). In the encoder part, we use a stride size of [8, 6, 5, 4, 2] to compress 1-second, $24\mathrm{kHz}$ audio into $12.5\mathrm{Hz}$ , followed by applying 3 RVQ layers to quantize it. As shown in Table 6, using previous audio codec frameworks makes it difficult to maintain good reconstruction performance in very low-bitrate settings. As a result, the proposed query-based compression method is more effective in this setting. + +The Influence of Semantic Prior for VQ To explore the influence of semantic priors on the audio codec model, we conduct an experiment where we remove the semantic prior and instead train a learnable RVQ following Encodec. As shown in Table 6, we find that updating the RVQ layer improves reconstruction performance but reduces semantic information, demonstrating that integrating semantic priors into the VQ layer enhances semantic information. + +The Influence of MAE Loss We also conduct experiments to evaluate the effectiveness of the MAE loss. As shown in Table 6, we find that the MAE loss is crucial for enhancing the semantic information in the codec model. Although the MAE loss has a slight negative effect on reconstruction, it is a crucial factor in building a better audio tokenizer. + +The Influence of AR Loss From Table 6, we observe that adding the AR loss reduces reconstruction performance. In Figure 3, we compare token prediction accuracy and TTS performance with and without LM loss. We observe that using LM loss significantly improves token prediction accuracy, particularly for the second and third VQ layers, which shows the effectiveness of our motivation and solution. + +The Influence of Two-stage Training As Table 6 shows, the two-stage training strategy is crucial as it significantly improves reconstruction performance and semantic information in the codec model. The Influence of Patchify Module We investigate two types of Patchify modules: Encode-style and StableCodec-style. As shown in Table 6, using Encode-style Patchify modules yields better performance. One possible reason is that StableCodec-style Patchify modules (Parker et al., 2024) may depend on larger data and model parameters, as the original paper scales their model to 1B. In contrast, we use only four transformer layers to ensure a fair comparison with Encode-style modules. Due to page limitations, we defer the ablation study on the influence of window size $w$ in query-based compression, codebook size, the influence of mask-rate, and model size on reconstruction to Appendix C. + +Table 7. The subjective reconstruction results using MUSHRA (comparative scoring of samples) of codec models on speech, sound and music. Bold for the best result and underline for the second-best result. + +
ModelsFPS/TPSCS/BRSpeech (↑)Sound (↑)Music (↑)
Speech
MimiCodec (3 RVQ) (Défossez et al., 2024)12.5/37.52048/0.41kbps65.61 ± 5.2--
MimiCodec (8 RVQ) (Défossez et al., 2024)12.5/1002048/1.1kbps86.7 ± 2.3--
StableCodec (Parker et al., 2024)25/2546656/0.4kbps81.7 ± 4.4--
SpeechTokenizer (Zhang et al., 2023)50/1501024/1.5bps73.7 ± 4.6--
Audio
Encodec (Défossez et al., 2022)50/1501024/1.5bps75.1 ± 3.977.2 ± 4.273.7 ± 4.6
DAC (Kumar et al., 2023)50/1501024/1.5bps79.3 ± 4.271.3 ± 4.171.3 ± 4.1
Wavtokenizer (Défossez et al., 2022)40/404096/0.48bps84.0 ± 2.163.1 ± 4.654.1 ± 5.4
Ours12.5/37.52048/0.41kbps84.8 ± 3.772.4 ± 4.769.0 ± 4.5
+ +# 4.6. Discussion + +In this section, we discuss two fundamental questions in audio tokenization. Question 1: Is a single quantization layer better than multiple quantization layers? Question 2: Does a low-bit rate with high reconstruction performance define a good audio tokenizer? + +Question 1 Although WavTokenizer and StableCodec demonstrate the potential to build a low-bitrate audio codec tokenizer with a single quantization layer, they rely on a higher frame rate (e.g., 25 or $40\mathrm{Hz}$ ). As shown in Figure 1, a lower frame rate (e.g., $12.5\mathrm{Hz}$ ) is critical for improving training efficiency. Thanks to UniAudio (Yang et al., 2023c) and Moshi's (Défossez et al., 2024) audio language model framework, multiple quantization layers do not increase the sequence length. Therefore, multiple quantization layers present an effective approach for building a low-bitrate, semantically rich audio codec. + +Question 2 To address this question, we present two comparisons. First, as shown in Tables 4 and 1, StableCodec exhibits better reconstruction performance and a lower bit-rate compared to WavTokenizer. However, when applied to the text-to-speech generation task, WavTokenizer demonstrates better robustness. One possible reason for this is that StableCodec uses a large-scale codebook size (46,656), which may increase the modeling complexity. Second, although MimiCodec has a higher bit-rate and poorer reconstruction performance than StableCodec, it demonstrates more stable TTS generation performance and better ASR performance. This phenomenon further underscores the importance of semantic information. In summary, a good audio tokenizer for an audio language model should not only consider low-bitrate and reconstruction, but also account for the semantic information in the codec model. + +# 5. Conclusion + +In this study, we present a low-bitrate, semantically rich audio codec tokenizer. Specifically, we propose a query-based + +compression strategy to effectively compress the audio data into a low-bitrate format while incorporating more semantic information. Furthermore, we introduce several training losses to enhance semantic information, including MAE loss and AR loss. Extensive experiments demonstrate the effectiveness of ALMTokensizer. Within the same audio language modeling framework, ALMTokensizer exhibits superior performance in both understanding and generation tasks. We discuss the limitation of this study in Appendix I. + +# Ethical Statement + +This paper presents an audio tokenizer for audio language models, which can be applied to various audio generation tasks, such as text-to-speech and text-to-music. There is potential for misuse in generating misinformation, deepfake audio, or other harmful content. We advocate for the development of a detection model to identify audio produced by the codec model and generated by other generative models. + +# References + +Agostinelli, A., Denk, T. I., Borsos, Z., Engel, J., Verzetti, M., Caillon, A., Huang, Q., Jansen, A., Roberts, A., Tagliasacchi, M., et al. Musicl: Generating music from text. arXiv preprint arXiv:2301.11325, 2023. +Ai, Y., Jiang, X.-H., Lu, Y.-X., Du, H.-P., and Ling, Z.-H. Apocodec: A neural audio codec with parallel amplitude and phase spectrum encoding and decoding. arXiv preprint arXiv:2402.10533, 2024. +Baevski, A., Zhou, Y., Mohamed, A., and Auli, M. wav2vec 2.0: A framework for self-supervised learning of speech representations. Advances in neural information processing systems, 33:12449-12460, 2020. +Banerjee, A. and Arora, V. wav2tok: Deep sequence tokenizer for audio retrieval. In The Eleventh International Conference on Learning Representations, 2022. + +Bertin-Mahieux, T., Ellis, D. P., Whitman, B., and Lamere, P. The million song dataset. In Proceedings of the 12th International Conference on Music Information Retrieval (ISMIR 2011), 2011. +Borsos, Z., Marinier, R., Vincent, D., Kharitonov, E., Pietquin, O., Sharifi, M., Roblek, D., Teboul, O., Grangier, D., Tagliasacchi, M., et al. Audiolm: a language modeling approach to audio generation. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 2023a. +Borsos, Z., Sharifi, M., Vincent, D., Kharitonov, E., Zeghidour, N., and Tagliasacchi, M. Soundstorm: Efficient parallel audio generation. arXiv preprint arXiv:2305.09636, 2023b. +Chen, S., Wang, C., Chen, Z., Wu, Y., Liu, S., Chen, Z., Li, J., Kanda, N., Yoshioka, T., Xiao, X., et al. Wavlm: Large-scale self-supervised pre-training for full stack speech processing. IEEE Journal of Selected Topics in Signal Processing, 16(6):1505-1518, 2022a. +Chen, S., Wu, Y., Wang, C., Liu, S., Tompkins, D., Chen, Z., and Wei, F. Beats: Audio pre-training with acoustic tokenizers. arXiv preprint arXiv:2212.09058, 2022b. +Copet, J., Kreuk, F., Gat, I., Remez, T., Kant, D., Synnaeve, G., Adi, Y., and Defossez, A. Simple and controllable music generation. arXiv preprint arXiv:2306.05284, 2023. +Costantini, G., Iaderola, I., Paoloni, A., Todisco, M., et al. Emovo corpus: an italian emotional speech database. In Proceedings of the ninth international conference on language resources and evaluation (LREC'14), pp. 3501-3504. European Language Resources Association (ELRA), 2014. +Défossez, A., Copet, J., Synnaeve, G., and Adi, Y. High fidelity neural audio compression. arXiv preprint arXiv:2210.13438, 2022. +Défossez, A., Mazaré, L., Orsini, M., Royer, A., Pérez, P., Jégou, H., Grave, E., and Zeghidour, N. Moshi: a speech-text foundation model for real-time dialogue. arXiv preprint arXiv:2410.00037, 2024. +Doh, S., Choi, K., Lee, J., and Nam, J. Lp-musiccaps: Llm-based pseudo music captioning. arXiv preprint arXiv:2307.16372, 2023. +Drossos, K., Lipping, S., and Virtanen, T. Clotho: An audio captioning dataset. In ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 736-740. IEEE, 2020. +Du, Z., Chen, Q., Zhang, S., Hu, K., Lu, H., Yang, Y., Hu, H., Zheng, S., Gu, Y., Ma, Z., et al. Cosyvoice: A scalable multilingual zero-shot text-to-speech synthesizer + +based on supervised semantic tokens. arXiv preprint arXiv:2407.05407, 2024. +Hao, H., Zhou, L., Liu, S., Li, J., Hu, S., Wang, R., and Wei, F. Boosting large language model for speech synthesis: An empirical study. arXiv preprint arXiv:2401.00246, 2023. +He, K., Chen, X., Xie, S., Li, Y., Dollar, P., and Girshick, R. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 16000-16009, 2022. +Ho, J., Jain, A., and Abbeel, P. Denoising diffusion probabilistic models. Advances in neural information processing systems, 33:6840-6851, 2020. +Hsu, W.-N., Bolte, B., Tsai, Y.-H. H., Lakhotia, K., Salakhutdinov, R., and Mohamed, A. Hubert: Self-supervised speech representation learning by masked prediction of hidden units. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 29:3451-3460, 2021. +Hu, E. J., Shen, Y., Wallis, P., Allen-Zhu, Z., Li, Y., Wang, S., Wang, L., and Chen, W. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685, 2021. +Huang, P.-Y., Xu, H., Li, J., Baevski, A., Auli, M., Galuba, W., Metze, F., and Feichtenhofer, C. Masked autoencoders that listen. Advances in Neural Information Processing Systems, 35:28708-28720, 2022. +Ji, S., Jiang, Z., Wang, W., Chen, Y., Fang, M., Zuo, J., Yang, Q., Cheng, X., Wang, Z., Li, R., et al. Wavtokenizer: an efficient acoustic discrete codec tokenizer for audio language modeling. arXiv preprint arXiv:2408.16532, 2024. +Ju, Z., Wang, Y., Shen, K., Tan, X., Xin, D., Yang, D., Liu, Y., Leng, Y., Song, K., Tang, S., et al. Naturalspeech 3: Zero-shot speech synthesis with factorized codec and diffusion models. arXiv preprint arXiv:2403.03100, 2024. +Kang, W., Yang, X., Yao, Z., Kuang, F., Yang, Y., Guo, L., Lin, L., and Povey, D. Libriheavy: a 50,000 hours asr corpus with punctuation casing and context. In ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 10991-10995. IEEE, 2024. +Kharitonov, E., Vincent, D., Borsos, Z., Marinier, R., Girgin, S., Pietquin, O., Sharifi, M., Tagliasacchi, M., and Zeghidour, N. Speak, read and prompt: High-fidelity text-to-speech with minimal supervision. arXiv preprint arXiv:2302.03540, 2023. + +Kim, C. D., Kim, B., Lee, H., and Kim, G. Audiocaps: Generating captions for audios in the wild. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pp. 119-132, 2019. +Kreuk, F., Synnaeve, G., Polyak, A., Singer, U., Défossez, A., Copet, J., Parikh, D., Taigman, Y., and Adi, Y. Audiogen: Textually guided audio generation. arXiv preprint arXiv:2209.15352, 2022. +Kumar, R., Seetharaman, P., Luebs, A., Kumar, I., and Kumar, K. High-fidelity audio compression with improved RVQGAN. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=qjnl1QUUnFA. +La Quatra, M., Koudounas, A., Vaiani, L., Baralis, E., Cagliero, L., Garza, P., and Siniscalchi, S. M. Benchmarking representations for speech, music, and acoustic events. In 2024 IEEE International Conference on Acoustics, Speech, and Signal Processing Workshops (ICAS-SPW), pp. 505-509, 2024. doi: 10.1109/ICASSPW62465.2024.10625960. +Li, H., Xue, L., Guo, H., Zhu, X., Lv, Y., Xie, L., Chen, Y., Yin, H., and Li, Z. Single-codec: Single-codebook speech codec towards high-performance speech generation. arXiv preprint arXiv:2406.07422, 2024. +Li, J., Li, D., Savarese, S., and Hoi, S. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In International conference on machine learning, pp. 19730-19742. PMLR, 2023. +Lipman, Y., Chen, R. T., Ben-Hamu, H., Nickel, M., and Le, M. Flow matching for generative modeling. arXiv preprint arXiv:2210.02747, 2022. +Liu, H., Xu, X., Yuan, Y., Wu, M., Wang, W., and Plumbley, M. D. Semanticodec: An ultra low bitrate semantic audio codec for general sound. arXiv preprint arXiv:2405.00233, 2024. +Lostanlen, V. and Cella, C.-E. Deep convolutional networks on the pitch spiral for musical instrument recognition. arXiv preprint arXiv:1605.06644, 2016. +Mei, X., Meng, C., Liu, H., Kong, Q., Ko, T., Zhao, C., Plumbley, M. D., Zou, Y., and Wang, W. Wavcaps: A chatgpt-assisted weakly-labelled audio captioning dataset for audio-language multimodal research. arXiv preprint arXiv:2303.17395, 2023. +Nguyen, T. A., Muller, B., Yu, B., Costa-Jussa, M. R., Elbayad, M., Popuri, S., Ropers, C., Duquenne, P.-A., Algayres, R., Mavlyutov, R., et al. Spirit-lm: Interleaved + +spoken and written language model. Transactions of the Association for Computational Linguistics, 13:30-52, 2025. +OpenAI. Gpt-4 technical report. arXiv preprint arXiv:2204.06125, 2023. +Panayotov, V., Chen, G., Povey, D., and Khudanpur, S. Librispeech: an asr corpus based on public domain audio books. In 2015 IEEE international conference on acoustics, speech and signal processing (ICASSP), pp. 5206-5210. IEEE, 2015. +Parker, J. D., Smirnov, A., Pons, J., Carr, C., Zukowski, Z., Evans, Z., and Liu, X. Scaling transformers for low-bitrate high-quality speech coding. arXiv preprint arXiv:2411.19842, 2024. +Piczak, K. J. Esc: Dataset for environmental sound classification. In Proceedings of the 23rd ACM international conference on Multimedia, pp. 1015-1018, 2015. +Pratap, V., Xu, Q., Sriram, A., Synnaeve, G., and Collobert, R. Mls: A large-scale multilingual dataset for speech research. arXiv preprint arXiv:2012.03411, 2020. +Reddy, C. K., Gopal, V., and Cutler, R. Dnsmos p. 835: A non-intrusive perceptual objective speech quality metric to evaluate noise suppressors. In ICASSP 2022-2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 886-890. IEEE, 2022. +Saeki, T., Xin, D., Nakata, W., Koriyama, T., Takamichi, S., and Saruwatari, H. Utmos: Utokyo-sarulab system for voicemos challenge 2022. arXiv preprint arXiv:2204.02152, 2022. +Siuzdak, H. Vocos: Closing the gap between time-domain and fourier-based neural vocoders for high-quality audio synthesis. arXiv preprint arXiv:2306.00814, 2023. +Tang, C., Yu, W., Sun, G., Chen, X., Tan, T., Li, W., Lu, L., MA, Z., and Zhang, C. SALMONN: Towards generic hearing abilities for large language models. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=14rn7HpKVk. +van Niekerk, B., Zäïdi, J., Carbonneau, M.-A., and Kamper, H. Spoken-term discovery using discrete speech units. arXiv preprint arXiv:2408.14390, 2024. +Veaux, C., Yamagishi, J., MacDonald, K., et al. Cstr vctk corpus: English multi-speaker corpus for cstr voice cloning toolkit. University of Edinburgh. The Centre for Speech Technology Research (CSTR), 6:15, 2017. + +Wang, C., Chen, S., Wu, Y., Zhang, Z., Zhou, L., Liu, S., Chen, Z., Liu, Y., Wang, H., Li, J., et al. Neural codec language models are zero-shot text to speech synthesizers. arXiv preprint arXiv:2301.02111, 2023. +Wang, H., Suri, S., Ren, Y., Chen, H., and Shrivastava, A. Larp: Tokenizing videos with a learned autoregressive generative prior. arXiv preprint arXiv:2410.21264, 2024a. +Wang, Y., Chen, H., Yang, D., Yu, J., Weng, C., Wu, Z., and Meng, H. Consistent and relevant: Rethink the query embedding in general sound separation. In ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 961-965. IEEE, 2024b. +Wang, Y., Chen, H., Yang, D., Li, W., Luo, D., Li, G., Yang, S., Wu, Z., Meng, H., and Wu, X. Unisep: Universal target audio separation with language models at scale. arXiv preprint arXiv:2503.23762, 2025. +Wu, H., Kanda, N., Eskimez, S. E., and Li, J. Ts3-codec: Transformer-based simple streaming single codec. arXiv preprint arXiv:2411.18803, 2024. +Yang, D., Liu, S., Huang, R., Lei, G., Weng, C., Meng, H., and Yu, D. Instructts: Modelling expressive tts in discrete latent space with natural language style prompt. arXiv preprint arXiv:2301.13662, 2023a. +Yang, D., Liu, S., Huang, R., Tian, J., Weng, C., and Zou, Y. Hifi-codec: Group-residual vector quantization for high fidelity audio codec. arXiv preprint arXiv:2305.02765, 2023b. +Yang, D., Tian, J., Tan, X., Huang, R., Liu, S., Chang, X., Shi, J., Zhao, S., Bian, J., Wu, X., et al. Uniaudio: An audio foundation model toward universal audio generation. arXiv preprint arXiv:2310.00704, 2023c. +Yang, D., Yu, J., Wang, H., Wang, W., Weng, C., Zou, Y., and Yu, D. Diffsound: Discrete diffusion model for text-to-sound generation. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 2023d. +Yang, D., Guo, H., Wang, Y., Huang, R., Li, X., Tan, X., Wu, X., and Meng, H. Uniaudio 1.5: Large language model-driven audio codec is a few-shot audio task learner. arXiv preprint arXiv:2406.10056, 2024a. +Yang, D., Huang, R., Wang, Y., Guo, H., Chong, D., Liu, S., Wu, X., and Meng, H. Simplespeech 2: Towards simple and efficient text-to-speech with flow-based scalar latent transformer diffusion models. arXiv preprint arXiv:2408.13893, 2024b. + +Yang, D., Wang, D., Guo, H., Chen, X., Wu, X., and Meng, H. Simplespeech: Towards simple and efficient text-to-speech with scalar latent transformer diffusion models. arXiv preprint arXiv:2406.02328, 2024c. +Yang, S.-w., Chi, P.-H., Chuang, Y.-S., Lai, C.-I. J., Lakhotia, K., Lin, Y. Y., Liu, A. T., Shi, J., Chang, X., Lin, G.-T., et al. Superb: Speech processing universal performance benchmark. arXiv preprint arXiv:2105.01051, 2021. +Yu, Q., Weber, M., Deng, X., Shen, X., Cremers, D., and Chen, L.-C. An image is worth 32 tokens for reconstruction and generation. arXiv preprint arXiv:2406.07550, 2024. +Zeghidour, N., Luebs, A., Omran, A., Skoglund, J., and Tagliasacchi, M. Soundstream: An end-to-end neural audio codec. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 30:495-507, 2021. +Zen, H., Dang, V., Clark, R., Zhang, Y., Weiss, R. J., Jia, Y., Chen, Z., and Wu, Y. Libritts: A corpus derived from librispeech for text-to-speech. arXiv preprint arXiv:1904.02882, 2019. +Zeng, A., Du, Z., Liu, M., Wang, K., Jiang, S., Zhao, L., Dong, Y., and Tang, J. Glm-4-voice: Towards intelligent and human-like end-to-end spoken chatbot. arXiv preprint arXiv:2412.02612, 2024. +Zhang, X., Zhang, D., Li, S., Zhou, Y., and Qiu, X. Speechtokenizer: Unified speech tokenizer for speech large language models. arXiv preprint arXiv:2308.16692, 2023. +Zhu, L., Wei, F., Lu, Y., and Chen, D. Scaling the codebook size of vqgan to 100,000 with a utilization rate of $99\%$ . arXiv preprint arXiv:2406.11837, 2024. + +![](images/1c1b8a8c84d828412e41619aab208a4b72ed13470a39bdd8ec78692ea2ef9051.jpg) +Figure 4. The left diagram illustrates the framework of the audio language model, which includes a pre-trained LLM, a LoRA module, and a depth transformer. The audio language model can process both text and audio streaming inputs and generate corresponding text and audio outputs. The right diagram provides details of hierarchical audio modeling. + +![](images/6c46a09a51c2d1af91d3429bf4ad54706550bf382847ad3d6090b3a56ed70075.jpg) + +# A. The details of audio language model framework + +In this section, we provide details of the audio language model. We follow the framework of UniAudio (Yang et al., 2023c) and Moshi (Défossez et al., 2024), which combines a pre-trained LLM with a smaller Transformer model to predict audio tokens in a hierarchical manner. In their original paper, both the LLM and the small Transformer are updated during the training process. Due to resource limitations, and following (Hao et al., 2023), we incorporate LoRA (Hu et al., 2021) into the LLM model. For the LLM model, we use the LLAMA3.2 1B version. During training, we update only the LoRA module and the small Transformer. + +LORA setting For the LoRA module, we add LoRA parameters to the self-attention and linear layers. We set $lora_{r} = 32$ and $lora_{alpha} = 16$ . + +Depth Transformer setting For the depth transformer, we use 6 self-attention layer. We set the attention head number as 32. The attention dimension is the same as the LLAMA 3.2 1B. + +# B. The details of the influence of bitrate and semantic information for audio language model. + +In this section, we provide details of the validation experiments to explore the influence of bitrate and semantic information on audio language models. Following AudioLM (Borsos et al., 2023a), we construct an audio token pre-training task similar to text pre-training, where the model is tasked with predicting the next audio token based on the previous token sequence. + +# B.1. Training data + +We conduct the experiments on 2000 hours speech data, these data is selected from MLS dataset (Pratap et al., 2020). + +# B.2. Test data + +We evaluate on LibriSpeech test clean set. + +Table 8. The reconstruction performance of different frame rate of audio tokenizers. + +
VersionBitrate (↓)FPS (↓)codebook sizePESQ (↑)UT-MOS (↑)VISQOL (↑)STOI (↑)
50hz1650bps5020482.223.693.630.86
25hz825bps2520482.073.563.610.83
12.5hz412.5bps12.520481.582.493.370.77
+ +# B.3. Framework + +We use the same framework as described in Section A; the difference is that we do not use text streaming. + +# B.4. Three Types of Audio Tokenizers + +Following the structure of MimiCodec (Défossez et al., 2024), we train three versions of the audio codec tokenizer. All of the audio codec models are trained on $24\mathrm{kHz}$ speech data. We train three versions of the audio codec models, as follows: + +(V1) We set the down-sampling rate to [2, 5, 6, 8], resulting in a $50\mathrm{Hz}$ frame rate. We use three RVQ layers, and the codebook size is 2,048. The bitrate of this audio codec is 1.65 kbps. +(V2) We set the down-sampling rate to [4, 5, 6, 8], resulting in a $25\mathrm{Hz}$ frame rate. We use three RVQ layers, and the codebook size is 2,048. The bitrate of this audio codec is 825 bps. +(V3) We set the down-sampling rate to [2, 4, 5, 6, 8], resulting in a $12.5\mathrm{Hz}$ frame rate. We use three RVQ layers, and the codebook size is 2,048. The bitrate of this audio codec is 412.5 bps. + +Note that the original MimiCodec is trained with distillation loss from WavLM; we do not add this loss during the training of our audio tokenizer. Therefore, these three audio tokenizers do not include any semantic information. Table 8 shows the reconstruction performance of the three audio tokenizers. + +# B.5. Semantic Tokenizer + +The previous three audio codec tokenizers do not consider semantic information. To evaluate the importance of semantic information, we follow WhisperSpeech5 to build a Whisper-based semantic tokenizer. Specifically, we follow the training code of WhisperSpeech, using two down-sampling layers to compress the Whisper encoder's features into a $12.5\mathrm{Hz}$ frame rate, and then we add three RVQ layers to quantize them. Thus, this semantic tokenizer has the same bitrate as the V3 audio tokenizer. + +# B.6. Evaluation metrics + +We evaluate the pre-training performance from the following aspects: + +Training efficiency: As is well known, the space complexity of a transformer is $O(T^2)$ , where $T$ is the sequence length. A low-bitrate audio tokenizer can compress the audio signal into a few token sequences, thereby improving training efficiency. For all experiments, we use the same GPU machine to train the model and record the statistical training duration. + +Inference efficiency: Similarly, a low-bitrate audio tokenizer can improve inference efficiency, as it requires fewer inference steps. We use the Real-Time Factor (RTF) to assess inference efficiency. Note that for all experiments, we do not use any inference optimization tricks, such as KV cache. + +Validation loss and perplexity: Following text LLMs (OpenAI, 2023), we use validation loss and perplexity to evaluate model performance. + +![](images/d94fab77d52195e7058b5d482d8f7f1f1f1533b9c9ca7a8d3dd5363564b5f2ed.jpg) +Figure 5. The performance comparison with different window size during inference. + +Table 9. The influence of codebook size for reconstruction performance. + +
Codebook SizePESQ (↑)UT-MOS (↑)VISQOL (↑)STOI (↑)STFT loss (↓)Token utilization (↑)
20482.03.763.780.811.20100%
10241.833.663.650.801.14100%
5121.693.643.580.7921.18100%
+ +# C. Ablation study + +# C.1. The influence of window size for ALMTokenizer + +As discussed in the previous section, the proposed ALMTokensizer supports a dynamic compression rate by changing the window size $w$ . Figure 5 shows the comparison of reconstruction performance with different window sizes. We observe that using a smaller window size results in better reconstruction performance, but it also increases the bitrate. For example, if the window size is 2, the bitrate is 1237.5bps, window size is 6, the bitrate is 412.5. It also shows the advantages of proposed method: we can dynamically change the frame rate during the inference by setting different window size. + +# C.2. The influence of codebook size + +We explore three different codebook sizes: 512, 1024, and 2048. To align with the setting of MimiCodec (Défossez et al., 2024), we set the max codebook size as 2048. The results, as shown in Table 9, are presented. We observe that scaling the codebook size improves reconstruction performance. Furthermore, we also find that almost all tokens have been used. + +# C.3. The influence of model size for reconstruction performance + +To explore the influence of model size on reconstruction performance, we set up two configurations: (1) We use 24 self-attention layers for both the transformer encoder and transformer decoder, resulting in 174M parameters. (2) We use 12 self-attention layers for both the transformer encoder and transformer decoder, resulting in 87M parameters. In both settings, we keep the Patchify module the same size, as it consists of several convolutional layers, and its total parameters are small. The experimental results, as shown in Table 10, indicate that using a larger model can improve reconstruction but also increases computational resource consumption (higher RTF). Previous work, StableCodec (Parker et al., 2024), shows that scaling the codec model to 1B parameters can lead to better performance. Due to computational resource limitations, we leave scaling to a larger model size for future work. + +Table 10. The influence of model for reconstruction performance. + +
SettingPESQ (↑)UT-MOS (↑)VISQOL (↑)STOI (↑)Model size (↓)RTF (↓)
24 attention layer2.03.763.780.811740.031
12 attention layer1.873.573.700.79870.019
+ +# C.4. The influence of mask-rate in MAE loss + +Inspired by MAE(He et al., 2022), we tested three groups of mask rates ranges: (10–20%), (20–30%), and (30–40%). The experiments as following Table shows. Results indicate that higher rates (30–40%) benefit semantics but harm reconstruction, leading us to adopt an intermediate range (20–30%). + +Table 11. The influence of mask-rate for MAE loss. + +
mask rate rangeUTMOSDNSMOSVISQOLPESQSTOIASRER
10-20%3.773.623.802.00.8118.727.7
20-30%3.763.643.782.00.8118.329.0
30-40%3.363.063.311.580.7718.129.6
+ +# D. Evaluation + +We evaluate the performance of the previous SOTA audio tokenizers and our proposed ALMTokensizer across audio reconstruction, audio semantic information, audio understanding, and audio generation tasks. + +# D.1. Audio Reconstruction + +For speech data, we use DNS-MOS (Reddy et al., 2022), UT-MOS (Saeki et al., 2022), PESQ, STOI (Short-Time Objective Intelligibility), VISQOL (speech version), and STFT loss as metrics. + +For sound and music data, we use VISQOL (audio version), STFT loss, and Mel loss. Furthermore, following (Kumar et al., 2023), we conduct the MUSHRA subjective test for speech, sound, and music. Specifically, we hire 10 audio-related researchers to conduct the MOS evaluation. We ask the listeners to rate each audio, with scores ranging from 0 to 100. Refer to D.5 for the details. + +Evaluation Datasets: For speech data, we evaluate on a subset of VCTK (Veaux et al., 2017) (200 speech utterances) and a subset of the LibriTTS test clean set (Zen et al., 2019) (400 speech utterances). For sound data, we evaluate on a subset of the AudioCaps validation set (Kim et al., 2019) (200 sound utterances). For music data, we evaluate on a subset of the MusicCaps (Agostinelli et al., 2023) dataset (200 music utterances). + +# D.2. Audio Semantic Information + +Previous SSL models, such as Hubert (Hsu et al., 2021) and WavLM (Chen et al., 2022a), have shown that semantic-rich representations can be used to solve downstream recognition tasks by fine-tuning several adaptor layers. Inspired by these works, we propose evaluating the performance of the audio tokenizer for downstream recognition tasks. We use the quantized features of the audio tokenizer as the input for downstream tasks. We follow two popular benchmarks: SUPERB (Yang et al., 2021) and ARCH (La Quatra et al., 2024). + +For speech data, we conduct the automatic speech recognition (ASR) task on the LibriSpeech (Panayotov et al., 2015) dataset and the emotion classification (EC) task on the EMOVO (Costantini et al., 2014) dataset. For the ASR task, we train on the LibriSpeech train-100 set and evaluate on the LibriSpeech test clean set. For the EC task, we follow ARCH (La Quatra et al., 2024) to split the training and test sets. + +For sound data, we conduct the sound classification task on the ESC-50 dataset (Piczak, 2015). For music data, we conduct the music classification task on the Medley-Solos-DB dataset (Lostanlen & Cella, 2016). For both tasks, we follow the ARCH benchmarking settings to split the training and test sets. + +For all experiments, we train for 10 epochs with the same learning rate and batch size. For the automatic speech recognition + +task, we use word error rate (WER) as the metric. For the other classification tasks, we use accuracy as the metric. + +# D.3. LM-based Audio Understanding + +Overview To further validate whether the audio tokenizer is suitable for building an audio language model, we propose conducting an audio understanding task using discrete tokens as input. We conduct three tasks: automatic speech recognition (ASR), audio captioning, and music captioning. We use the framework introduced in Section A. For audio data, we use the audio tokenizer to encode it as discrete tokens; for text data, we use the BPE tokenizer of LLAMA 3.2. We construct the sequence as [audio token, text token], then the model is asked to predict the text token based on the previous audio token. + +Training Data For the ASR task, we select 2,000 hours of LibriHeavy speech data (Kang et al., 2024). For the audio captioning tasks, we use AudioCaps (Kim et al., 2019) and BBC sound effects (Mei et al., 2023). For the BBC sound effects, we cut off the first 10 seconds of audio if the utterance duration is greater than 10 seconds. Finally, we obtain about 500 hours of sound data. For the music captioning task, we use a subset of the Million Song dataset. We cut off the first 10 seconds of music data for each utterance, which results in about 500 hours of music data. For the corresponding captions, we use LPMusicCaps (Doh et al., 2023). + +Test Data For the ASR task, we evaluate on the LibriSpeech test clean set. For the audio captioning task, we evaluate on the AudioCaps dataset (Kim et al., 2019). For the music captioning task, we evaluate on the MusicCaps dataset (Agostinelli et al., 2023). + +Metrics Similarly, we use WER as the evaluation metric for the ASR task. For audio and music captioning, we follow (Drossos et al., 2020) and adopt BLEU-1, BLEU-2, BLEU-3, METEOR, ROUGE-L, CIDEr-D, SPICE, and SPIDER metrics. + +Inference Setting For inference, we directly use the top-k sampling strategy and set $k = 30$ for all experiments. + +# D.4. LM-based Audio Generation + +We also perform audio generation tasks, including text-to-speech, text-to-sound, and text-to-music generation. Similarly, we construct the sequence as [text token, audio token], then the model is asked to predict the audio token based on the previous text token. + +Training and Test Data We use the same training and test data as the audio comprehension task. + +Metrics For TTS evaluation, we use WER to evaluate robustness, and UTMOS and DNSMOS are used to assess speech quality. For text-to-sound and text-to-music, we follow previous works AudioGen (Kreuk et al., 2022), using Fréchet Audio Distance (FAD), Kullback-Leibler (KL) Divergence, and Fréchet Distance (FD) for audio fidelity and similarity. + +Inference Setting During the inference stage, we use the top-k sampling strategy and set $k = 30$ for all experiments. + +# D.5. Subjective Evaluations + +For the subjective evaluations, we adopt the approach used in previous works (Kumar et al., 2023; Parker et al., 2024) and use the MUSHRA format without a hidden anchor. Listeners are asked to compare multiple versions of an example simultaneously, including both a labeled reference and a hidden reference. They are given the following instructions: "Please assess the quality similarity between an audio sample and its reference. Listen carefully to the reference audio, then rate the quality of each test clip in comparison. A score of 0 indicates no resemblance to the reference, while a score of 100 means it is identical to the reference." We randomly select 10 samples from each category (speech, music, and sound) in the test set, ensuring that each sample receives 10 ratings. + +# E. Audio Tokenizer Baselines + +To make a fair comparison, we classify the audio tokenizers into two types: (1) speech-based tokenizers, which are trained on speech datasets, and (2) audio-based tokenizers, which are trained on speech, sound, and music datasets. + +# E.1. Speech Tokenizer + +For speech data, we compare with: + +Table 12. The performance comparison on LibriTTS test clean. Bold for the best result and underline for the second-best result. + +
ModelsFPS/TPSCS/BRReconstructionEfficiency
UTMOS (↑)DNS-MOS (↑)VISQOL (↑)STOI (↑)PESQ (↑)Model size (M) (↓)RTF (↓)
Encodec50/4001024/6kbps3.303.763.950.942.72140.019
Encodec50/1501024/1.5kbps2.023.273.830.881.79140.019
DAC50/1501024/1.5kbps2.613.363.850.891.96710.026
Wavtokenizer40/404096/0.48kbps3.653.613.800.871.81770.017
StableCodec25/2546656/0.4kbps4.203.743.510.881.859500.039
MimiCodec (3 RVQ)12.5/37.52048/0.41kbps2.823.283.340.831.4075.60.023
ALMTokensizer (Ours)12.5/37.52048/0.41kbps3.683.643.900.901.921740.031
+ +(1) Encodec (Defossez et al., 2022), a SOTA audio codec model trained on large-scale speech, sound, and music datasets. The official open-sourced $24\mathrm{kHz}$ version is used. +(2) DAC-Codec (Kumar et al., 2023), which offers very high reconstruction performance. It is trained on large-scale speech, sound, and music datasets. The official open-sourced $24\mathrm{kHz}$ version is used. +(3) MimiCodec (Défossez et al., 2024), a SOTA low-bitrate speech codec model trained on a large-scale speech dataset. The sampling rate is $24\mathrm{kHz}$ . +(4) SpeechTokenizer (Zhang et al., 2023), a semantic-rich speech codec model trained on a large-scale speech dataset. The sampling rate is $16\mathrm{kHz}$ . +(5) WavTokenizer (Ji et al., 2024), an audio codec tokenizer trained on large-scale speech, sound, and music datasets. The sampling rate is $24\mathrm{kHz}$ . + +To make a fair comparison, for Encodec, DAC-Codec, and SpeechTokenizer, we use the first three RVQ layers to control the bitrate during inference. + +# E.2. Audio Tokenizer + +For sound and music data, we compare with Encodec, DAC-Codec, and WavTokenizer. These three models are trained on large-scale speech, sound, and music datasets. + +# E.3. Semantic Models + +Furthermore, to evaluate the performance of semantic information, we also introduce several SSL-based models. For speech, we use WavLM (Chen et al., 2022a) and HuBERT (Hsu et al., 2021). For sound and music, we use BEATs (Chen et al., 2022b) and Wav2Vec2-AudioSet $^{6}$ . + +# F. More audio tokenizer evaluation experiments + +# F.1. The subjective evaluation for audio tokenizer + +Table 7 shows the subjective evaluation results for audio tokenizer. + +# F.2. Evaluation results on LibriTTS test clean + +We report the reconstruction performance evaluated on a subset of the LibriTTS test clean set, where we randomly select 400 speech utterances. Additionally, we calculate the Real-Time Factor (RTF) and model size to assess efficiency. For RTF evaluation, we use an NVIDIA A100 GPU to evaluate all models. + +# F.3. Length generalization + +StableCodec (Parker et al., 2024) highlights that the introduction of transformer-based architectures can lead to the length generalization problem. For instance, the training data of ALMTokenizer consists of 5-second segments, whereas the test + +Table 13. Objective metrics for the ALMTokenizer and baselines, evaluated on utterances from length 4s to 10s, showing generalization of models across lengths + +
ModelFPSTPSBitratePESQ (↑)UT-MOS (↑)VISQOL (↑)STOI (↑)DNSMOS (↑)
4 seconds
Encodec501501.5kbps1.972.643.620.803.26
DAC501501.5kbps2.13.173.650.813.26
Ours12.537.50.41kbps1.843.633.690.793.41
6 seconds
Encodec501501.5kbps1.972.543.630.813.26
DAC501501.5kbps2.03.113.650.813.28
Ours12.537.50.41kbps1.893.663.750.813.62
8 seconds
Encodec501501.5kbps1.962.523.630.813.34
DAC501501.5kbps2.13.183.660.813.28
Ours12.537.50.41kbps1.953.553.740.813.66
10 seconds
Encodec501501.5kbps1.952.533.650.813.32
DAC501501.5kbps2.12.193.670.813.25
Ours12.537.50.41kbps1.963.543.730.813.66
+ +data comprises segments of varying durations. We evaluate the model across four distinct length levels: 4, 6, 8, and 10 seconds. Encodec and DAC are selected as baselines due to their reliance on convolutional layers, which demonstrate robustness to variable input lengths. As shown in Table 13, the evaluation results indicate that ALMTokensizer effectively handles inference across these diverse lengths. These findings suggest that ALMTokensizer exhibits strong generalization capabilities with respect to input length variation. + +# F.4. Compared to diffusion-based audio codec models + +We compare ALMTokens with an alternative family of audio tokenizers that leverage discrete semantic tokens derived from self-supervised pre-trained (SSL) models (e.g., Hubert (Hsu et al., 2021), WavLM (Chen et al., 2022a), AudioMAE (Huang et al., 2022)). These models first quantize the SSL features into semantic tokens and subsequently use a generative model to resynthesize the waveform. Diffusion (Ho et al., 2020) and Flow-Matching (Lipman et al., 2022) are two popular generative models. Previous works, such as GLM4-Voice tokenizer (Zeng et al., 2024) and SemantiCodec (Liu et al., 2024), have demonstrated success using diffusion-based decoders. However, such strategies tend to result in significant information loss. For instance, the semantic tokens in GLM4-Voice lack timbre information and require additional prompts to control timbre during decoding. Notably, the open-sourced GLM4-Voice tokenizer uses a fixed timbre, meaning that any speech encoded by GLM4-Voice will lose its original timbre. To address this information loss in semantic tokens, SemantiCodec introduces acoustic streaming to enhance waveform reconstruction. A key concern, however, is that both SemantiCodec and GLM4-Voice tokenizers demand significantly more computational resources during the inference stage. In the following, we present a comprehensive comparison between ALMTokens and SemantiCodec, focusing on the following aspects: (1) reconstruction performance for speech, sound, and music; (2) semantic information performance for speech, sound, and music; and (3) computational resource requirements during inference, measured using RTF. + +Table 14 shows the speech reconstruction and semantic performance, where we observe that ALMTokenizer outperforms the alternatives in both aspects while using less bitrate. Table 15 presents experimental results for sound and music data, where ALMTokenizer again demonstrates superior performance across all metrics compared to SemantiCodec. In Table 16, we present the model size and RTF metrics, showing that ALMTokenizer has fewer model parameters and significantly surpasses SemantiCodec in inference speed (0.031 vs 0.92). + +# G. The details of ALMTokenizer structure and training + +# G.1. Model structure + +Table 17 gives the details of ALMTokensizer configuration, which results in 174M parameters. In all of experiments, for the MAE-transformer encoded and decoder, we adopt a 8 layer transformer layers. + +Table 14. The performance comparison between ALMTokensizer and SemanticCodec on VCTK dataset. + +
ModelsFPS/TPSCS/BRReconstructionSemantic
UTMOS (↑)DNS-MOS (↑)VISQOL (↑)STOI (↑)PESQ (↑)ASR (↓)EC (↑)
SemantiCodec50/5016384/0.68kbps3.23.573.900.811.7648.317.8
ALMTokensizer12.5/37.52048/0.41kbps3.763.643.780.812.018.329.0
+ +Table 15. The performance comparison between ALMTokensizer and SemanticCodec on Music (MusicCaps) and sound data (AudioCaps). + +
ModelsFPS/TPSCS/BRReconstructionSemantic
Mel loss (↓)STFT loss (↓)VISQOL (↑)Classification (↑)
Sound data
SemantiCodec50/5016384/0.68kbps18.451.402.4738.8%
ALMTokensizer12.5/37.52048/0.41kbps15.01.242.9944%
Music data
SemantiCodec50/5016384/0.68kbps47.91.582.4948%
ALMTokensizer12.5/37.52048/0.41kbps34.41.323.9659%
+ +Patchify and UnPatchify modules A single-channel audio signal $\pmb{x} \in \mathcal{R}^{1 \times N}$ (where $N$ denotes the sampling points) is processed through the Encodec-style Patchify and UnPatchify modules, which adopt the same structure as Encodec (Défossez et al., 2022), consisting of four convolutional blocks. Each convolutional block consists of a residual unit followed by a down-sampling layer. These convolution blocks effectively encode the audio signal $\pmb{x}$ into an audio frame representation $e \in \mathcal{R}^{T \times d}$ , where $T$ denotes the number of frames and $d$ denotes the dimension of each vector. The convolution blocks are followed by a two-layer LSTM for sequence modeling, followed by a final 1D convolutional layer with a kernel size of 7 and $D$ output channels. The UnPatchify module mirrors the Patchify architecture by substituting stride convolutions with transposed convolutions and reversing the stride order. + +For the StableCodec-style Patchify and UnPatchify modules, we follow the approach in StableCodec (Parker et al., 2024) and use a reshape operation to transform $\boldsymbol{x} \in \mathcal{R}^{t \times sr}$ into $e \in \mathcal{R}^{T \times d}$ , where $T = N / 320$ and $d = 320$ . We then apply a linear layer to map the dimension to $D$ . Finally, we add four transformer layers for sequence modeling. Similarly, the UnPatchify module mirrors the Patchify architecture. + +Discriminators For the discriminators, we follow prior work (Défossez et al., 2022), which combines mel-spectrogram and log-mel-spectrogram features and inputs them into a network consisting of several convolutional layers. Specifically, we use six discriminators with different configurations: the hidden dimensions are set as 64, 128, 256, 512, 512, 512, and the hop lengths are set as 32, 64, 128, 256, 512, 1024. + +# G.2. Reconstruction loss and adversarial loss for ALMTokenizer + +Let the reconstructed signal be $\hat{\pmb{x}}$ . For the reconstruction loss, we design it from two perspectives: the time domain and the frequency domain. We first compute the $L_{1}$ loss between $\pmb{x}$ and $\hat{\pmb{x}}$ in the time domain. Next, we compute the $L_{1}$ loss between the STFT spectrogram of $\pmb{x}$ and $\hat{\pmb{x}}$ in the frequency domain. Following (Wang et al., 2024b), we employ a sub-band split strategy to divide the spectrogram into several parts. The adversarial loss is employed to enhance the perceptual quality of the generated audio: + +$$ +\mathcal {L} _ {d} = \frac {1}{K} \sum_ {i = 1} ^ {K} \max (0, 1 - D _ {k} (\boldsymbol {x})) + \max (0, 1 + D _ {k} (\hat {\boldsymbol {x}})) \tag {3} +$$ + +where $K$ denotes the number of discriminators. During the training stage, the adversarial loss for the generator is computed as a hinge loss over the logits of these discriminators: + +$$ +\mathcal {L} _ {a d v} = \frac {1}{K} \sum_ {i = 1} ^ {K} \max (0, 1 - D _ {k} (\hat {\boldsymbol {x}})) \tag {4} +$$ + +The feature loss $\mathcal{L}_{feat}$ is computed by taking the average absolute difference between the discriminator's internal layer outputs for the generated audio and those for the corresponding real audio. + +Table 16. The model size and RTF comparison between SemantiCodec and ALMTokensizer. + +
ModelModel size (M) (↓)RTF (↓)
SemantiCodec5070.92
ALMTokenizer (Ours)1740.031
+ +
ALMTokenizer
Input shape(B, 1, N)
Patchify module (output)(B, T, d), T=N/320
Token Interleaving and Retrievalw ∈ [2, 3, 4, 5, 6, 7, 8, 9, 10]
Dimension of transformer encoder256
The number of transformer encoder24
Dimension of transformer decoder512
The number of transformer decoder24
Codebook size2048
VQ layers3
Number of Transformer heads64
UnPatchify module (output)(B, 1, N)
+ +Table 17. ALMTokenizer model backbone configurations + +# G.3. Training details + +The AdamW optimizer is used in the training. We set the learn rate as $1e - 4$ . We train the model with 200k steps. The final loss as following shows. We set $\lambda_{1} = 0.5$ and $\lambda_{2} = 0.1$ during our experiments. We conduct all of the experiments with 4 NVIDIA A100-80G GPUs. + +$$ +\mathcal {L} = \mathbf {L} _ {\text {a d v}} + \mathbf {L} _ {\text {f e a t}} + \mathbf {L} _ {\text {r e c}} + \lambda_ {1} \mathbf {L} _ {\text {M A E}} + \lambda_ {2} \mathbf {L} _ {\text {A R}} \tag {5} +$$ + +# H. Reproducibility Statement + +To enhance reproducibility, we provide the pseudocode of ALMTokensizer. In the future, we plan to improve both the model structure and training data to obtain more robust models, especially for music and sound, and release the code for the research community. + +Listing 1. Pseudocode of ALMTokenizer +```python +class ALMTokensizer: def __init__(self, transformerEncoder_args, transformerDecoder_args, maeDecoder_args, depth_gpt_args, patchify_args, encoder_embedding_dim, decoder_embedding_dim, semantic_prior_path, mask_rate, window_sizes = [2,3,4,5,6,7,8,9,10],): self(window_sizes = window_sizes self.transformerEncoder = Transformer(transformerEncoder_args) self.transformerDecoder = Transformer(transformerDecoder_args) self.maedecoder = Transformer(maedecoder_args) self.Patchify = EncodeEncoder(patchify_args) self.UnPatchify = EncodeDecoder(patchify_args) +``` + +```python +self.cls_token = nnParameter(torch.zeros(1, 1, encoder_embedding_dim)) +selfmasked_token = nnParameter(torch.zeros(1, 1, decoder_embedding_dim)) +checkpoint = torch.load(semantic_prior_path, map_location="cpu") +self.vq = RVQ_semantic( + input_dim=encoder_embedding_dim, + semantic_prior = checkpoint, + layers = 3) +self.depth_gpt = GPT Decoder(depth_gpt_args) +self.temp_window_size = 6 +self_mask_rate = mask_rate +def Encoder_token_Interleaving(self, x): + B, T, D = x.shape # batch, length, dim +cls_tokens = self.cls_tokenrepeat(B, (T//self.tmp_window_size), 1).unsqueeze(2) + new_T = T + (T // self.tmp_window_size) +x reshaped = x reshape(B, T // self.tmp_window_size, self.tmp_window_size, D) +x_withCls = torch.cat([x reshaped, cls_tokens], dim=2) +new_x = x_withCls.reshape(B, -1, D) +return new_x +def Encoder_token_Retrieval(self, x): + B, new_T, D = x.shape +original_T = new_T - new_T // (self.tmp_window_size + 1) +mask Indices = [(i + 1) * (self.tmp_window_size + 1) - 1 for i in range(original_T // self.tmp_window_size)] +cls_tokens = new_x[;, mask Indices, :] +returnCLS_tokens +def Decoder_token_Interleaving(self, en_token): + B, T, D = en_token.shape +x = self-mask_tokenrepeat(B, 1, 1) +new_T = en_token.shape[1] * self.tmp_window_size + en_token.shape[1] +x = x.repeata(1, en_token.shape[1] * self.tmp_window_size, 1) +x = x.reshape(B, -1, self.tmp_window_size, D) +x_with Masks = torch.cat([x, en_token.unsqueeze(2)], dim=2) +new_x = x_with Masksreshape(B, -1, D) +return new_x +def Decoder_token_Retrieval(self, new_x): + B, new_T, D = new_x.shape +num_masks = new_T // (self.interval + 1) +original_T = new_T - num_masks +maskIndices = [(i + 1) * (self.interval + 1) - 1 for i in range(num_masks)] +allIndices = list(range(new_T)) +maskIndices = [i for i in allIndices if i not in maskIndices] +mask Frames = new_x[;, maskIndices,:] +return mask Frames +def forward(self, x): + x_len = x.shape[-1] +self.temp_window_size = choice(selfwindow_sizes) +emb Frames = self.Patchify(x) +if self.trainin: + emb Frames_mask = self.apply_mask(emb Frames, mask_rate = self-mask_rate) +interleaving Frames = self.Encoder_token_Interleaving(emb Frames_mask) +predictDSP = self.maedecoder(interleavingFrames) +mae_loss = L1_loss(predictDSP, emb Frames) +latent_tokens = self.transformer Encoder(interleavingFrames) +query_token = self.Encoder_token_Retrieval(latent_tokens) +Quantized_token, codes, allquantized = self.vq(query_token) +cat_quantized = [] +for q_emb in all_quantized: +``` + +```python +q_emb = q_emb.reshape(-1, q_emb.shape[-1]).unsqueeze(1) +cat_quantized.append(q_emb) +cat_quantized = torch.cat(cat_quantized, dim=1) +gpt_loss = self.depth_gpt.compute_prior_loss(cat_quantized) +de_interleaving Frames = self.Decoder_token_Interleaving(Quantized_token) +delatent_token = self.transformer Decoder(de_interleaving Frames) +mask_tokens = self.Decoder_token_Retestval(de_forensic_token) +x_ = self.UnPatchify mask_tokens) +return x_, mae_loss, gpt_loss +``` + +# I. Limitation + +In this study, we present ALMTokenizer, a low-bitrate, semantic-rich audio codec tokenizer. We demonstrate that ALM-Tokenizer excels in both reconstruction and semantic information retention under low-bitrate conditions. However, we acknowledge that there is still significant room for improvement in reconstruction performance, particularly for sound and music data. Building an audio tokenizer for sound and music in the low-bitrate setting poses additional challenges. In terms of semantic information, ALMTokenizer still lags behind traditional SSL models. Although we propose several training losses to enhance semantic information in the codec model, the improvements are limited and, in some cases, negatively impact reconstruction quality. We recognize the need for a careful design and balance of these semantic loss terms. Additionally, the multi-stage training strategy increases training complexity. These training strategy brings waste. Most of the components are eventually discarded, e.g. MAE-transformer encoder/decoder, MAE-decoder, and depth AR-transformer. These components would have made sense to still utilize them for some purpose, e.g. the AR decoder could have been used to initialize the depth transformer in the Language modeling task. These concerns are left for future work. \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10344/images/082256a1f928b9b771e0604046246e9c0a0de124d1e9ec2d048c5dcfe42c390e.jpg b/data/2025/2504_10xxx/2504.10344/images/082256a1f928b9b771e0604046246e9c0a0de124d1e9ec2d048c5dcfe42c390e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3317c59621f5bf29e54f61968380b09a7548ec93 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/images/082256a1f928b9b771e0604046246e9c0a0de124d1e9ec2d048c5dcfe42c390e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0754eec56449ba109f933db939b2bf4a2a53559c76ccbfbe72ae2297afffe0e4 +size 94717 diff --git a/data/2025/2504_10xxx/2504.10344/images/105b727952b5ca74e95cd96f0079e5c39919e423b44aa967df89a083ef0b942e.jpg b/data/2025/2504_10xxx/2504.10344/images/105b727952b5ca74e95cd96f0079e5c39919e423b44aa967df89a083ef0b942e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..794b399375ec450d1c811ee14a4670c01c71bc3d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/images/105b727952b5ca74e95cd96f0079e5c39919e423b44aa967df89a083ef0b942e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c11cabd194478a6861e180914d3d1753ddc9e4b9326786b3bfd3426259929c8d +size 33434 diff --git a/data/2025/2504_10xxx/2504.10344/images/12cb13002e9cb2cd2e016f4eedbe2301e9a8b67a8aa802b90f2eea6edb26fbcc.jpg b/data/2025/2504_10xxx/2504.10344/images/12cb13002e9cb2cd2e016f4eedbe2301e9a8b67a8aa802b90f2eea6edb26fbcc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a8e41a30c50694e2d6c8e70ec08c199c9cd536c9 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/images/12cb13002e9cb2cd2e016f4eedbe2301e9a8b67a8aa802b90f2eea6edb26fbcc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d88fcf1d369ab3f6483e4e0403d37c60e7e9dcf5eda2414a8b9b4d4ce675441 +size 5854 diff --git a/data/2025/2504_10xxx/2504.10344/images/13fe390d7e322759f0fba09a33c1158fc9542e63e8aee9389f8bf854e645d46b.jpg b/data/2025/2504_10xxx/2504.10344/images/13fe390d7e322759f0fba09a33c1158fc9542e63e8aee9389f8bf854e645d46b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..32d05e897a064d988dc00598f92e7cccf02df709 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/images/13fe390d7e322759f0fba09a33c1158fc9542e63e8aee9389f8bf854e645d46b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a7344fb5177b743744cae9460984bf3274f7f6cf0035c65346f096678d70498 +size 33021 diff --git a/data/2025/2504_10xxx/2504.10344/images/17f3ab99fd1a09e4e9beee635e4fd0043664ff45bf148bb33396b5a7d9c15ef7.jpg b/data/2025/2504_10xxx/2504.10344/images/17f3ab99fd1a09e4e9beee635e4fd0043664ff45bf148bb33396b5a7d9c15ef7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d22a8d90298fa83bbe03789c97cc5a843e4d95fb --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/images/17f3ab99fd1a09e4e9beee635e4fd0043664ff45bf148bb33396b5a7d9c15ef7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd3fafe987f476fa81d1adcab187b36551fe11fb1d1cc9672149c11d14c9f7c3 +size 29207 diff --git a/data/2025/2504_10xxx/2504.10344/images/1867ab2fb144b56a586a497203f019a503a17af3cdbe1769e511357e997096e0.jpg b/data/2025/2504_10xxx/2504.10344/images/1867ab2fb144b56a586a497203f019a503a17af3cdbe1769e511357e997096e0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..00d1ad8d6cd0d7ea9d9f44d79bc3edb0b40d1e0a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/images/1867ab2fb144b56a586a497203f019a503a17af3cdbe1769e511357e997096e0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9a6d1df771478eb57b22db8b4734610e4868da3e56205b6b4eecd80de39935b +size 90698 diff --git a/data/2025/2504_10xxx/2504.10344/images/1c1b8a8c84d828412e41619aab208a4b72ed13470a39bdd8ec78692ea2ef9051.jpg b/data/2025/2504_10xxx/2504.10344/images/1c1b8a8c84d828412e41619aab208a4b72ed13470a39bdd8ec78692ea2ef9051.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4a177df71c8f8d01c66ec3071062bdae7f367f1c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/images/1c1b8a8c84d828412e41619aab208a4b72ed13470a39bdd8ec78692ea2ef9051.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc0aa3f04f46e26da7171c6b665169b0cad51a1879fbb8c60386b3c2818d685a +size 30901 diff --git a/data/2025/2504_10xxx/2504.10344/images/1f2afdd26c38ad12bec4637ffdc1de7b03af211f664c49636beea44b22135499.jpg b/data/2025/2504_10xxx/2504.10344/images/1f2afdd26c38ad12bec4637ffdc1de7b03af211f664c49636beea44b22135499.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cc801373434a20673a673792d59df3f709d85de9 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/images/1f2afdd26c38ad12bec4637ffdc1de7b03af211f664c49636beea44b22135499.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4a6b2fb7046428423ac8371a13242e0b26ab07ebbe97ba187e75a2859ae0f5e +size 38636 diff --git a/data/2025/2504_10xxx/2504.10344/images/35e03b6e4c5c45765bd47a9c8f63f9226b39547c700af3edf772945e2d8a0229.jpg b/data/2025/2504_10xxx/2504.10344/images/35e03b6e4c5c45765bd47a9c8f63f9226b39547c700af3edf772945e2d8a0229.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9db4fc59a9908d61e7289e38869b9ec275d0847e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/images/35e03b6e4c5c45765bd47a9c8f63f9226b39547c700af3edf772945e2d8a0229.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0768968a3a00956a5be9ddee5728d322c9bcddf525f95fe2ec68719c49ed7d3c +size 6421 diff --git a/data/2025/2504_10xxx/2504.10344/images/36bf7ac0e5ab73bda6d7ca539bf65a78caa3f38942f1291ac6626ec8bb140cad.jpg b/data/2025/2504_10xxx/2504.10344/images/36bf7ac0e5ab73bda6d7ca539bf65a78caa3f38942f1291ac6626ec8bb140cad.jpg new file mode 100644 index 0000000000000000000000000000000000000000..acac7687a0a6aea413e2569baed53ba8aea375d4 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/images/36bf7ac0e5ab73bda6d7ca539bf65a78caa3f38942f1291ac6626ec8bb140cad.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7228c84457b3c88ea51fb1221d667071c61c73572e16548eab337669ffbfeba +size 9502 diff --git a/data/2025/2504_10xxx/2504.10344/images/3c483e95f4e46d9b474b355afbc8f55a9279df8ed331b640ead0025710bc583e.jpg b/data/2025/2504_10xxx/2504.10344/images/3c483e95f4e46d9b474b355afbc8f55a9279df8ed331b640ead0025710bc583e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6cfde5d0e79e4eb9384de63fbb9cbe7275cbdef1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/images/3c483e95f4e46d9b474b355afbc8f55a9279df8ed331b640ead0025710bc583e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1461dd61432a090d886eb40a23e79cfbf9bd52f2be5954d0b8bb29a04522c0d +size 84413 diff --git a/data/2025/2504_10xxx/2504.10344/images/3c618b74f688548add74bec89c6981564b20b5fb43a06e3d3e40ad5bc0570a5f.jpg b/data/2025/2504_10xxx/2504.10344/images/3c618b74f688548add74bec89c6981564b20b5fb43a06e3d3e40ad5bc0570a5f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5b1abe2130ff5f22f82c03abc8e9d4ac5f3cd95b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/images/3c618b74f688548add74bec89c6981564b20b5fb43a06e3d3e40ad5bc0570a5f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa96c159a8c7100a3d74c081503f009ac0b713228c91301eae2951754f4edd23 +size 14238 diff --git a/data/2025/2504_10xxx/2504.10344/images/65e54bea32de06f51d9c5ce9da1e1a2189806386fd5b23c8a2f36505d83d130b.jpg b/data/2025/2504_10xxx/2504.10344/images/65e54bea32de06f51d9c5ce9da1e1a2189806386fd5b23c8a2f36505d83d130b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6317e49f7bd65cf4366a8207c4e5e28d680e578e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/images/65e54bea32de06f51d9c5ce9da1e1a2189806386fd5b23c8a2f36505d83d130b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f1c3aeac5d305f09f6a2db47feac3dc6db6514b4ddefb5a1826eb56bdc954d2 +size 26679 diff --git a/data/2025/2504_10xxx/2504.10344/images/6c46a09a51c2d1af91d3429bf4ad54706550bf382847ad3d6090b3a56ed70075.jpg b/data/2025/2504_10xxx/2504.10344/images/6c46a09a51c2d1af91d3429bf4ad54706550bf382847ad3d6090b3a56ed70075.jpg new file mode 100644 index 0000000000000000000000000000000000000000..98ab527de7ab6eb903c21e7272b4fa91caca88bf --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/images/6c46a09a51c2d1af91d3429bf4ad54706550bf382847ad3d6090b3a56ed70075.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:680899a747a3704eeda3d3e1f590b2cdbd707a126e09e3d87680dd91159813e8 +size 39687 diff --git a/data/2025/2504_10xxx/2504.10344/images/6d44cde2f64ff7b03fe3f9a37cca4f2bb251ac87eae5b10bc5cf0924c1174a82.jpg b/data/2025/2504_10xxx/2504.10344/images/6d44cde2f64ff7b03fe3f9a37cca4f2bb251ac87eae5b10bc5cf0924c1174a82.jpg new file mode 100644 index 0000000000000000000000000000000000000000..63d7115979cb840ecee1013226a3445380eba096 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/images/6d44cde2f64ff7b03fe3f9a37cca4f2bb251ac87eae5b10bc5cf0924c1174a82.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a2d2f0133653033085c55dde51f1ad39e8fa93c5bc6bd26d5abdc83f1f34dd7 +size 55764 diff --git a/data/2025/2504_10xxx/2504.10344/images/7eb3af90218801d9df7252db544ed8ebf2c58ddeb96adbbaf15ad379992732a6.jpg b/data/2025/2504_10xxx/2504.10344/images/7eb3af90218801d9df7252db544ed8ebf2c58ddeb96adbbaf15ad379992732a6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ba2cad8c2c97639ccb94e0780687e8ca42adcbab --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/images/7eb3af90218801d9df7252db544ed8ebf2c58ddeb96adbbaf15ad379992732a6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7572bc7c1c0dc23d5b1838b6c44d05fdd3506a3ffe4f42b3da56df335196795e +size 14065 diff --git a/data/2025/2504_10xxx/2504.10344/images/847af1e78f02e93f03fa836136aadb7e2bfa7fe3f787b38cbddd0915c8294463.jpg b/data/2025/2504_10xxx/2504.10344/images/847af1e78f02e93f03fa836136aadb7e2bfa7fe3f787b38cbddd0915c8294463.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c0fbd7cb12425cf8ac3fae2ef486a94ba9d503e7 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/images/847af1e78f02e93f03fa836136aadb7e2bfa7fe3f787b38cbddd0915c8294463.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62726e18f20fd4a6550883b735b12b0b3b99c0f8580b9ad444c985c70d082d40 +size 65759 diff --git a/data/2025/2504_10xxx/2504.10344/images/929236d9a847bdafe944104601209dbf98eadb1d765b6edf85d4aa3a2145a540.jpg b/data/2025/2504_10xxx/2504.10344/images/929236d9a847bdafe944104601209dbf98eadb1d765b6edf85d4aa3a2145a540.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9161b713ee6095cbca8ee074a489d38498d3cce8 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/images/929236d9a847bdafe944104601209dbf98eadb1d765b6edf85d4aa3a2145a540.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4743ede40d613f0695fa726ea2af6e0f7ec8922d5bd31027bd9ece4ff3ec98c5 +size 50302 diff --git a/data/2025/2504_10xxx/2504.10344/images/9f4ec6d1eb51ddf04ea710cf886db9d3b82fb21f6a7d4b9c84f522637ec04adb.jpg b/data/2025/2504_10xxx/2504.10344/images/9f4ec6d1eb51ddf04ea710cf886db9d3b82fb21f6a7d4b9c84f522637ec04adb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4e7a3e97cdf4b2c054f90910ede31de414f7ff0e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/images/9f4ec6d1eb51ddf04ea710cf886db9d3b82fb21f6a7d4b9c84f522637ec04adb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:081a1b92f602b86a111b3d801e273af763179348d0d045908869749203b73549 +size 53721 diff --git a/data/2025/2504_10xxx/2504.10344/images/b85bef3806853a9e62f3afbd2d853354b00e87704dab5446ce7e4c2bfb7a5fe5.jpg b/data/2025/2504_10xxx/2504.10344/images/b85bef3806853a9e62f3afbd2d853354b00e87704dab5446ce7e4c2bfb7a5fe5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c5717d48696eb739802533bb43758937ba00ae97 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/images/b85bef3806853a9e62f3afbd2d853354b00e87704dab5446ce7e4c2bfb7a5fe5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1eefb8333d017cdf0ccd98d861982f211c1c2a837e687c9d2d35df99fb4d7239 +size 24098 diff --git a/data/2025/2504_10xxx/2504.10344/images/bc38bc37fde7e41eb41f137a017389aa04332f4882f0f1624d7d8a673e5c16c8.jpg b/data/2025/2504_10xxx/2504.10344/images/bc38bc37fde7e41eb41f137a017389aa04332f4882f0f1624d7d8a673e5c16c8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0e9957dd6b5f2dfab638ce0a889feb41bf5cb7e3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/images/bc38bc37fde7e41eb41f137a017389aa04332f4882f0f1624d7d8a673e5c16c8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3a03f987f6af5800efd3f69d70eb65af5c8b875f0e2597fbe5aedf24d00720f +size 23616 diff --git a/data/2025/2504_10xxx/2504.10344/images/d14fe6162b3ac4c03543d5fc61d9d14b856032b8180431b1c829610ad257161d.jpg b/data/2025/2504_10xxx/2504.10344/images/d14fe6162b3ac4c03543d5fc61d9d14b856032b8180431b1c829610ad257161d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..970229fc69cdcf915b7d0c9bb9cb80356722a89c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/images/d14fe6162b3ac4c03543d5fc61d9d14b856032b8180431b1c829610ad257161d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68b5df5cff2b1871fdcb45d35707822803c7fb148c334f31d5e84a8e933932d4 +size 21396 diff --git a/data/2025/2504_10xxx/2504.10344/images/d94fab77d52195e7058b5d482d8f7f1f1f1533b9c9ca7a8d3dd5363564b5f2ed.jpg b/data/2025/2504_10xxx/2504.10344/images/d94fab77d52195e7058b5d482d8f7f1f1f1533b9c9ca7a8d3dd5363564b5f2ed.jpg new file mode 100644 index 0000000000000000000000000000000000000000..88fb7e3d3b5a2daa7e6f7ef87798584739098683 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/images/d94fab77d52195e7058b5d482d8f7f1f1f1533b9c9ca7a8d3dd5363564b5f2ed.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b24ba63f9ac71b4ccd07472d256500eb8fea1b9801d090386cb6e24623733aea +size 32259 diff --git a/data/2025/2504_10xxx/2504.10344/images/ebeb2a8c7b24539dd827c8b2160b82d162ceb27058d6262bf860960f2abb7cb7.jpg b/data/2025/2504_10xxx/2504.10344/images/ebeb2a8c7b24539dd827c8b2160b82d162ceb27058d6262bf860960f2abb7cb7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d282f29d91e0f92519b8244f90326e97aeb3384b --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/images/ebeb2a8c7b24539dd827c8b2160b82d162ceb27058d6262bf860960f2abb7cb7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dcf3b375df8ff53792b7a6dfd01df2d11340e8b2d14fd59a68ec93017e9a0641 +size 32653 diff --git a/data/2025/2504_10xxx/2504.10344/images/ef002626f27bb198e207f58573c778f26f9a135585bdd4fb9d3fb8064870efa7.jpg b/data/2025/2504_10xxx/2504.10344/images/ef002626f27bb198e207f58573c778f26f9a135585bdd4fb9d3fb8064870efa7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e23a20567c8a9a4a3a2ef16d87392c32143d51c6 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/images/ef002626f27bb198e207f58573c778f26f9a135585bdd4fb9d3fb8064870efa7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf76e5a5ec5ad3e8315c71689cd8ec4d7deb82080c8568a1fea10407485c7d9c +size 75313 diff --git a/data/2025/2504_10xxx/2504.10344/images/f019bb22a5fffe4804a7ec41e4db6b17b6e1f070f9aabb5852f94021dc8b83bd.jpg b/data/2025/2504_10xxx/2504.10344/images/f019bb22a5fffe4804a7ec41e4db6b17b6e1f070f9aabb5852f94021dc8b83bd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..24783040a1ba8d951756693d012851bfc1a759d7 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/images/f019bb22a5fffe4804a7ec41e4db6b17b6e1f070f9aabb5852f94021dc8b83bd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9280ccc03fd209469cfe991977b7d0c0908ed5c283ce0b874c7da93934359b41 +size 94692 diff --git a/data/2025/2504_10xxx/2504.10344/images/f59e452f380f40477af82cd8fc63a94ba7b3a987a6cdc597a6a915a3c081ab47.jpg b/data/2025/2504_10xxx/2504.10344/images/f59e452f380f40477af82cd8fc63a94ba7b3a987a6cdc597a6a915a3c081ab47.jpg new file mode 100644 index 0000000000000000000000000000000000000000..99e5be252e6811456eb30ece4431fa0cc3d699e3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/images/f59e452f380f40477af82cd8fc63a94ba7b3a987a6cdc597a6a915a3c081ab47.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5456904b9f17e394cbc26736b36626049a31d4c23642046e38be7ba8396797b +size 36499 diff --git a/data/2025/2504_10xxx/2504.10344/images/f7cd93e06c98bc3efdd9f5760db268469ffcaa66c85e4c9a9bb222007a090ce7.jpg b/data/2025/2504_10xxx/2504.10344/images/f7cd93e06c98bc3efdd9f5760db268469ffcaa66c85e4c9a9bb222007a090ce7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..510e825fa5dd7636de1467aded5e44bc061cfd7d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/images/f7cd93e06c98bc3efdd9f5760db268469ffcaa66c85e4c9a9bb222007a090ce7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:439325af6023dcbcd0789d0b4b1f1075166fdc41357a66929bfec3c9ab45e7d4 +size 10687 diff --git a/data/2025/2504_10xxx/2504.10344/images/ffc97f5c916131474b63ace04ed89cb0ce6ca0f77448c37ae20c999aec41e707.jpg b/data/2025/2504_10xxx/2504.10344/images/ffc97f5c916131474b63ace04ed89cb0ce6ca0f77448c37ae20c999aec41e707.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9e3b123cb8073dbb4674b7f52d1b49a1b3ba7ee3 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/images/ffc97f5c916131474b63ace04ed89cb0ce6ca0f77448c37ae20c999aec41e707.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2362e44d988ca4210ccb4f32d5ec1cd31352aed63ba35d4f2cd3fee71873f7b8 +size 8848 diff --git a/data/2025/2504_10xxx/2504.10344/layout.json b/data/2025/2504_10xxx/2504.10344/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..2af5680835e13c1760207c712bd27e4a9b643a11 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10344/layout.json @@ -0,0 +1,14405 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 58, + 87, + 536, + 124 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 87, + 536, + 124 + ], + "spans": [ + { + "bbox": [ + 58, + 87, + 536, + 124 + ], + "type": "text", + "content": "ALMTokenizer: A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 157, + 541, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 157, + 541, + 184 + ], + "spans": [ + { + "bbox": [ + 53, + 157, + 541, + 184 + ], + "type": "text", + "content": "Dongchao Yang" + }, + { + "bbox": [ + 53, + 157, + 541, + 184 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 53, + 157, + 541, + 184 + ], + "type": "text", + "content": " Songxiang Liu" + }, + { + "bbox": [ + 53, + 157, + 541, + 184 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 53, + 157, + 541, + 184 + ], + "type": "text", + "content": " Haohan Guo" + }, + { + "bbox": [ + 53, + 157, + 541, + 184 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 53, + 157, + 541, + 184 + ], + "type": "text", + "content": " Jiankun Zhao" + }, + { + "bbox": [ + 53, + 157, + 541, + 184 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 53, + 157, + 541, + 184 + ], + "type": "text", + "content": " Yuanyuan Wang" + }, + { + "bbox": [ + 53, + 157, + 541, + 184 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 53, + 157, + 541, + 184 + ], + "type": "text", + "content": " Helin Wang" + }, + { + "bbox": [ + 53, + 157, + 541, + 184 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 53, + 157, + 541, + 184 + ], + "type": "text", + "content": " Zeqian Ju" + }, + { + "bbox": [ + 53, + 157, + 541, + 184 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 53, + 157, + 541, + 184 + ], + "type": "text", + "content": " Xubo Liu" + }, + { + "bbox": [ + 53, + 157, + 541, + 184 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 53, + 157, + 541, + 184 + ], + "type": "text", + "content": " Xueyuan Chen" + }, + { + "bbox": [ + 53, + 157, + 541, + 184 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 53, + 157, + 541, + 184 + ], + "type": "text", + "content": " Xu Tan" + }, + { + "bbox": [ + 53, + 157, + 541, + 184 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 53, + 157, + 541, + 184 + ], + "type": "text", + "content": " Xixin Wu" + }, + { + "bbox": [ + 53, + 157, + 541, + 184 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 53, + 157, + 541, + 184 + ], + "type": "text", + "content": " Helen Meng" + }, + { + "bbox": [ + 53, + 157, + 541, + 184 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 148, + 205, + 196, + 217 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 205, + 196, + 217 + ], + "spans": [ + { + "bbox": [ + 148, + 205, + 196, + 217 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 72, + 221, + 272, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 221, + 272, + 580 + ], + "spans": [ + { + "bbox": [ + 72, + 221, + 272, + 580 + ], + "type": "text", + "content": "Recent advancements in audio language models have underscored the pivotal role of audio tokenization, which converts audio signals into discrete tokens, thereby facilitating the application of language model architectures to the audio domain. In this study, we introduce ALMTokenizer, a novel low-bitrate and semantically rich audio codec tokenizer for audio language models. Prior methods, such as Encodec, typically encode individual audio frames into discrete tokens without considering the use of context information across frames. Unlike these methods, we introduce a novel query-based compression strategy to capture holistic information with a set of learnable query tokens by explicitly modeling the context information across frames. This design not only enables the codec model to capture more semantic information but also encodes the audio signal with fewer token sequences. Additionally, to enhance the semantic information in audio codec models, we introduce the following: (1) A masked autoencoder (MAE) loss, (2) Vector quantization based on semantic priors, and (3) An autoregressive (AR) prediction loss. As a result, ALMTokenizer achieves competitive reconstruction performance relative to state-of-the-art approaches while operating at a lower bitrate. Within the same audio language model framework, ALMTokenizer outperforms previous tokenizers in audio understanding and generation tasks." + }, + { + "bbox": [ + 72, + 221, + 272, + 580 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 603, + 133, + 616 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 603, + 133, + 616 + ], + "spans": [ + { + "bbox": [ + 53, + 603, + 133, + 616 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 623, + 290, + 648 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 623, + 290, + 648 + ], + "spans": [ + { + "bbox": [ + 52, + 623, + 290, + 648 + ], + "type": "text", + "content": "The field of generative modeling has witnessed remarkable progress, largely driven by the success of autoregressive" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 654, + 290, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 654, + 290, + 686 + ], + "spans": [ + { + "bbox": [ + 52, + 654, + 290, + 686 + ], + "type": "text", + "content": "*Equal contribution 1The Chinese University of Hong Kong, Hong Kong, China 2Independent Authors. Correspondence to: Dongchao Yang ." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 205, + 543, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 205, + 543, + 350 + ], + "spans": [ + { + "bbox": [ + 304, + 205, + 543, + 350 + ], + "type": "text", + "content": "(AR) models in the development of large language models (LLMs) (OpenAI, 2023). Inspired by the success of LLMs in the fields of natural language processing (NLP), recent works have begun to employ AR transformers for audio generation (Borsos et al., 2023a; Agostinelli et al., 2023; Yang et al., 2023c), such as using the AR transformer paradigm to solve text-to-speech task (Wang et al., 2023), or expanding the text LLM into multimodal LLM by integrating the audio modality into the original LLM (Défossez et al., 2024). Audio tokenizer plays an important role in all of these models, which converts audio signals into discrete token sequence for AR audio language modeling." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 303, + 354, + 544, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 354, + 544, + 715 + ], + "spans": [ + { + "bbox": [ + 303, + 354, + 544, + 715 + ], + "type": "text", + "content": "In the literature, audio codec models, such as SoundStream (Zeghidour et al., 2021) and Encodec (Défossez et al., 2022), have been widely adopted as audio tokenizers for audio language models. These generative models aim to represent audio data in a quantized discrete latent space, where the codec's decoder is then used to reconstruct the audio signals from the generated discrete token sequences. Recently, there has been significant interest in the audio community regarding audio codec tokenizers, leading to the proposal of several novel models (Kumar et al., 2023; Ji et al., 2024; Défossez et al., 2024; Parker et al., 2024; Zhang et al., 2023). Despite the advancements in audio codec models, an important research question remains unanswered: which type of audio codec is most suitable for audio language modeling? Inspired by previous works (Borsos et al., 2023a; Parker et al., 2024; Ji et al., 2024; Défossez et al., 2024), these studies investigate two key properties of audio codec models: low bitrate and semantic richness. We first conduct a set of evaluation experiments to explore the influence of bitrate and semantic information on audio language modeling. Specifically, we train three audio codec models with varying bitrates, while keeping the number of vector quantization (VQ) layers constant and adjusting the frame rates to " + }, + { + "bbox": [ + 303, + 354, + 544, + 715 + ], + "type": "inline_equation", + "content": "50\\mathrm{Hz}" + }, + { + "bbox": [ + 303, + 354, + 544, + 715 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 303, + 354, + 544, + 715 + ], + "type": "inline_equation", + "content": "25\\mathrm{Hz}" + }, + { + "bbox": [ + 303, + 354, + 544, + 715 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 303, + 354, + 544, + 715 + ], + "type": "inline_equation", + "content": "12.5\\mathrm{Hz}" + }, + { + "bbox": [ + 303, + 354, + 544, + 715 + ], + "type": "text", + "content": ". We then train the audio language model using different audio tokenizers on the same dataset. To assess the impact of semantic information, we also train a " + }, + { + "bbox": [ + 303, + 354, + 544, + 715 + ], + "type": "inline_equation", + "content": "12.5\\mathrm{Hz}" + }, + { + "bbox": [ + 303, + 354, + 544, + 715 + ], + "type": "text", + "content": " semantic tokenizer and incorporate it into the audio language model. Further details can be found in Appendix B. Figure 1 presents the results, which show that: (1) low-bitrate audio codec models significantly en" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 210, + 37, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 210, + 37, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 210, + 37, + 559 + ], + "type": "text", + "content": "arXiv:2504.10344v1 [cs.SD] 14 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 52, + 696, + 90, + 706 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 696, + 90, + 706 + ], + "spans": [ + { + "bbox": [ + 52, + 696, + 90, + 706 + ], + "type": "text", + "content": "Pre-print." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 66, + 706, + 220, + 717 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 706, + 220, + 717 + ], + "spans": [ + { + "bbox": [ + 66, + 706, + 220, + 717 + ], + "type": "text", + "content": "1http://dongchaoyang.top/ALMTokensizer/" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 67, + 291, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 67, + 291, + 199 + ], + "spans": [ + { + "bbox": [ + 52, + 67, + 291, + 199 + ], + "type": "text", + "content": "hance training and inference efficiency; and (2) semantic information is more easily modeled by LM-based generative methods, e.g. lower PPL and loss. The experimental findings demonstrate the importance of constructing a low-bitrate and semantic-rich audio codec tokenizer for audio language modeling. Based on these results, we propose a novel audio codec tokenizer that offers the following advantages: (1) Low-bitrate: it compresses the audio data into fewer tokens; (2) Semantic-rich: it incorporates abundant semantic information; (3) AR-driven latent space: it optimizes the latent space for autoregressive (AR) modeling." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 205, + 292, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 205, + 292, + 479 + ], + "spans": [ + { + "bbox": [ + 53, + 205, + 292, + 479 + ], + "type": "text", + "content": "To achieve this objective, we propose the following novel techniques: (1) We introduce a novel query-based compression strategy, which uses a set of learnable query tokens to capture holistic information by explicitly modeling the context information across audio frames with transformer layers. This strategy effectively takes advantage of the strong modeling capabilities of transformers to achieve better compression and semantic modeling. It also enables dynamic control over the compression rate by adjusting the number of query tokens. (2) To enhance semantic richness in the codec model, we introduce a Masked Autoencoder (MAE) loss, which encourages the model to capture more global information. (3) Inspired by previous works (Zhu et al., 2024), we propose the integration of semantic priors into the VQ layer. Specifically, we perform k-means clustering on the pre-trained wav2vec2 (Baevski et al., 2020) and BEATs (Chen et al., 2022b) encoder outputs, using the cluster centers to initialize the VQ layer. (4) We observe that AR models struggle to fit the distribution of the residuals in the VQ layers, with token prediction accuracy being notably lower in the second and third VQ layers compared to the first. To address this issue, we introduce an AR prediction loss to optimize the latent space." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 479, + 291, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 479, + 291, + 576 + ], + "spans": [ + { + "bbox": [ + 52, + 479, + 291, + 576 + ], + "type": "text", + "content": "To evaluate the effectiveness of the ALMTokenizer, we first compare its reconstruction and semantic performance with previous state-of-the-art models. Using the same audio language model framework, we then demonstrate that ALMTokenizer achieves superior performance in LM-based audio understanding and generation tasks, including text-to-speech (TTS), speech-to-text (ASR), audio captioning, text-to-sound, text-to-music, and music captioning." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 590, + 144, + 604 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 590, + 144, + 604 + ], + "spans": [ + { + "bbox": [ + 52, + 590, + 144, + 604 + ], + "type": "text", + "content": "2. Related Works" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 611, + 178, + 623 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 611, + 178, + 623 + ], + "spans": [ + { + "bbox": [ + 52, + 611, + 178, + 623 + ], + "type": "text", + "content": "2.1. Audio Language Models" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 630, + 291, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 630, + 291, + 713 + ], + "spans": [ + { + "bbox": [ + 52, + 630, + 291, + 713 + ], + "type": "text", + "content": "Recently, there has been a growing interest in bridging audio and text through multimodal learning approaches. Models such as AudioLM (Borsos et al., 2023a) leverage AR transformers and hierarchical modeling techniques to process audio data directly, learning representations that capture both linguistic and acoustic features. Inspired by AudioLM, VALL-E (Wang et al., 2023) and SPEAR-TTS (Kharitonov" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 315, + 64, + 533, + 277 + ], + "blocks": [ + { + "bbox": [ + 315, + 64, + 533, + 277 + ], + "lines": [ + { + "bbox": [ + 315, + 64, + 533, + 277 + ], + "spans": [ + { + "bbox": [ + 315, + 64, + 533, + 277 + ], + "type": "image", + "image_path": "1f2afdd26c38ad12bec4637ffdc1de7b03af211f664c49636beea44b22135499.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 288, + 542, + 310 + ], + "lines": [ + { + "bbox": [ + 304, + 288, + 542, + 310 + ], + "spans": [ + { + "bbox": [ + 304, + 288, + 542, + 310 + ], + "type": "text", + "content": "Figure 1. The performance comparison when different types of tokenizer is used for audio modeling. PPL refers to perplexity." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 303, + 315, + 544, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 315, + 544, + 518 + ], + "spans": [ + { + "bbox": [ + 303, + 315, + 544, + 518 + ], + "type": "text", + "content": "et al., 2023) formulate the text-to-speech task as an audio language modeling problem: generating an audio token sequence with the help of an autoregressive transformer. MusicLM (Agostinelli et al., 2023) and MusicGen (Copet et al., 2023) frame the text-to-music task as an audio language modeling problem. UniSep (Wang et al., 2025) explores using audio LM to solve audio separation tasks with the help of audio tokenizer. Moshi (Défossez et al., 2024), SpiRitLM (Nguyen et al., 2025), and GLM4-Voice (Zeng et al., 2024) explore speech-to-speech conversation. Furthermore, audio tokenizers can also be combined with discrete diffusion models (Yang et al., 2023d;a; Borsos et al., 2023b; Ju et al., 2024). In all of these models, the audio tokenizer plays a crucial role by transforming audio data into a discrete latent sequence, reducing computational demands compared to directly processing the audio signal, and enhancing the effectiveness and efficiency of the generation process." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 531, + 395, + 542 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 531, + 395, + 542 + ], + "spans": [ + { + "bbox": [ + 304, + 531, + 395, + 542 + ], + "type": "text", + "content": "2.2. Audio Tokenizer" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 303, + 550, + 544, + 716 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 550, + 544, + 716 + ], + "spans": [ + { + "bbox": [ + 303, + 550, + 544, + 716 + ], + "type": "text", + "content": "In the literature, both semantic and acoustic tokenizers are widely employed in audio language models. The semantic tokenizer is trained using pre-trained self-supervised learning (SSL) models, such as Hubert (Hsu et al., 2021) and WavLM (Chen et al., 2022a). Applying k-means or vector quantization in these models generates semantic tokens (Zeng et al., 2024; Du et al., 2024; Liu et al., 2024). Previous works (Borsos et al., 2023a) demonstrate that semantic tokens are more easily modeled by language models. However, due to the loss of significant acoustic information in semantic tokens, they rely on an additional decoder to generate high-fidelity waveform, such as a diffusion model (Ho et al., 2020) or flow-matching (Lipman et al., 2022). Inevitably, this additional module results in increased infer" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 127, + 45, + 467, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 45, + 467, + 56 + ], + "spans": [ + { + "bbox": [ + 127, + 45, + 467, + 56 + ], + "type": "text", + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 83, + 75, + 195, + 228 + ], + "blocks": [ + { + "bbox": [ + 83, + 75, + 195, + 228 + ], + "lines": [ + { + "bbox": [ + 83, + 75, + 195, + 228 + ], + "spans": [ + { + "bbox": [ + 83, + 75, + 195, + 228 + ], + "type": "image", + "image_path": "3c618b74f688548add74bec89c6981564b20b5fb43a06e3d3e40ad5bc0570a5f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 51, + 245, + 543, + 268 + ], + "lines": [ + { + "bbox": [ + 51, + 245, + 543, + 268 + ], + "spans": [ + { + "bbox": [ + 51, + 245, + 543, + 268 + ], + "type": "text", + "content": "Figure 2. The left part illustrates the framework of the previous audio codec, while the right part provides an overview of the proposed ALMTokensizer. " + }, + { + "bbox": [ + 51, + 245, + 543, + 268 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 51, + 245, + 543, + 268 + ], + "type": "text", + "content": " denotes the window size. The details of ALMTokensizer can be found in Section 3.2." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 201, + 75, + 512, + 227 + ], + "blocks": [ + { + "bbox": [ + 201, + 75, + 512, + 227 + ], + "lines": [ + { + "bbox": [ + 201, + 75, + 512, + 227 + ], + "spans": [ + { + "bbox": [ + 201, + 75, + 512, + 227 + ], + "type": "image", + "image_path": "929236d9a847bdafe944104601209dbf98eadb1d765b6edf85d4aa3a2145a540.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 286, + 229, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 286, + 229, + 297 + ], + "spans": [ + { + "bbox": [ + 52, + 286, + 229, + 297 + ], + "type": "text", + "content": "ence complexity and poorer reconstruction." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 298, + 291, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 298, + 291, + 717 + ], + "spans": [ + { + "bbox": [ + 52, + 298, + 291, + 717 + ], + "type": "text", + "content": "Acoustic tokenizer refers to audio codec models, trained for acoustic-level reconstruction tasks. Audio codec (Zeghidour et al., 2021; Defossez et al., 2022; Yang et al., 2023b; Kumar et al., 2023) have demonstrated exceptional performance in reconstructing high-quality audio. In general, these codec models consist of an encoder, a quantizer, and a decoder. Both the encoder and decoder are lightweight, resulting in minimal inference costs. Compared to semantic tokens, codec models can support audio, speech, and music domains, and their rich acoustic details mitigate the need for cascading architectures in downstream generative models. Recently, an increasing number of audio codec models have been proposed, focusing on (1) Better reconstruction quality, such as DAC (Kumar et al., 2023), Vocos (Siuzdak, 2023), SQ-Codec (Yang et al., 2024c;b) and APCodec (Ai et al., 2024); (2) Low-bitrate models, such as HiFiCodec (Yang et al., 2023b), wavtokenizer (Ji et al., 2024), StableCodec (Parker et al., 2024), and TS3-Coded (Wu et al., 2024); (3) Task-driven codec, designed for text-to-speech tasks, such as FACodec (Ju et al., 2024), SpeechTokenizer (Zhang et al., 2023), Single-Coded (Li et al., 2024), audio retrieval-based Tokenizers (Banerjee & Arora, 2022; van Niekerk et al., 2024). In this study, we focus on developing a low-bitrate, semantically rich audio codec tokenizer. The most closely related work to ours is MimiCodec (Defossez et al., 2024), which provides high-quality semantic information while achieving a low bitrate (1.1 kbps). However, MimiCodec relies on knowledge distillation from WavLM (Chen et al., 2022a) to the first VQ layer, whereas the remaining VQ layers do not incorporate semantic information. Furthermore, it is specifically designed for speech tasks and has not been validated for non-speech tasks, such as sound and music generation. In contrast to MimiCodec, our ALMTokens encode more semantic information across all VQ layers, achieves a lower bitrate, and is designed for both speech and" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 286, + 366, + 298 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 286, + 366, + 298 + ], + "spans": [ + { + "bbox": [ + 304, + 286, + 366, + 298 + ], + "type": "text", + "content": "general sound." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 313, + 411, + 327 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 313, + 411, + 327 + ], + "spans": [ + { + "bbox": [ + 304, + 313, + 411, + 327 + ], + "type": "text", + "content": "3. Proposed Method" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 334, + 544, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 334, + 544, + 394 + ], + "spans": [ + { + "bbox": [ + 304, + 334, + 544, + 394 + ], + "type": "text", + "content": "This section introduces the technical details of the proposed ALMTokensizer. Section 3.1 presents the framework of previous audio codec models. Section 3.2 presents the details of proposed audio codec framework. In Sections 3.3 and 3.4, we present the training loss and training strategies." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 407, + 377, + 419 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 407, + 377, + 419 + ], + "spans": [ + { + "bbox": [ + 304, + 407, + 377, + 419 + ], + "type": "text", + "content": "3.1. Preliminary" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 303, + 426, + 543, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 426, + 543, + 605 + ], + "spans": [ + { + "bbox": [ + 303, + 426, + 543, + 605 + ], + "type": "text", + "content": "Previous audio codec (Défossez et al., 2022; Zeghidour et al., 2021) typically adopt an encoder-quantizer-decoder framework, as shown in the left part of Figure 2. The audio is encoded into several audio frames by the encoder. Then, residual vector quantization (RVQ) (Zeghidour et al., 2021) is used to quantize these audio frames. Lastly, the decoder is used to recover the waveform from the quantized audio frames. It can be observed that previous works treat each audio frame equally and rely on these quantized frames to recover the audio. However, such a strategy (1) ignores the fact that different audio frames encode different levels of information, which results in some audio frames being difficult to recover in low-bitrate settings (e.g., encoding the audio frames at " + }, + { + "bbox": [ + 303, + 426, + 543, + 605 + ], + "type": "inline_equation", + "content": "12.5\\mathrm{Hz}" + }, + { + "bbox": [ + 303, + 426, + 543, + 605 + ], + "type": "text", + "content": "); (2) fails to utilize the context information between different frames." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 618, + 468, + 631 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 618, + 468, + 631 + ], + "spans": [ + { + "bbox": [ + 304, + 618, + 468, + 631 + ], + "type": "text", + "content": "3.2. Query-based Audio Compression" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 303, + 636, + 544, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 636, + 544, + 708 + ], + "spans": [ + { + "bbox": [ + 303, + 636, + 544, + 708 + ], + "type": "text", + "content": "To construct a low-bitrate, semantically rich audio codec model, we propose a query-based compression strategy. Our approach is inspired by the success of MAE (He et al., 2022), which applies a masking operation to the original image with a high mask rate (75%). With the help of a transformer encoder and decoder, it is possible to recover the masked" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 127, + 45, + 468, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 45, + 468, + 56 + ], + "spans": [ + { + "bbox": [ + 127, + 45, + 468, + 56 + ], + "type": "text", + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 68, + 291, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 68, + 291, + 259 + ], + "spans": [ + { + "bbox": [ + 52, + 68, + 291, + 259 + ], + "type": "text", + "content": "image content by utilizing the context information between different patches. Thus, we propose using a group of query tokens " + }, + { + "bbox": [ + 52, + 68, + 291, + 259 + ], + "type": "inline_equation", + "content": "{}^{2}" + }, + { + "bbox": [ + 52, + 68, + 291, + 259 + ], + "type": "text", + "content": " to capture holistic audio context information from the audio frames with the assistance of a transformer encoder. Since these query tokens include rich context information, it is possible to reconstruct the audio based on them. Then, a transformer decoder and mask tokens are employed to reconstruct the audio from the quantized query tokens. This strategy leverages the powerful modeling capabilities of transformers to achieve better compression and semantic modeling. Similar query-based strategies has been widely explored in previous works, such as BLIP2 (Li et al., 2023), SALMONN (Tang et al., 2024) and TiTok(Yu et al., 2024). The right part of Figure 2 illustrates the overall framework of ALMTokensizer. In the following sections, we detail each component and the associated training loss." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 264, + 292, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 264, + 292, + 469 + ], + "spans": [ + { + "bbox": [ + 52, + 264, + 292, + 469 + ], + "type": "text", + "content": "Patchify and UnPatchify We explore two types of Patchify modules: (1) Following Encodec (Défossez et al., 2022), a convolution-based module, which encodes the audio data " + }, + { + "bbox": [ + 52, + 264, + 292, + 469 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 52, + 264, + 292, + 469 + ], + "type": "text", + "content": " into " + }, + { + "bbox": [ + 52, + 264, + 292, + 469 + ], + "type": "inline_equation", + "content": "e \\in \\mathcal{R}^{T \\times d}" + }, + { + "bbox": [ + 52, + 264, + 292, + 469 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 52, + 264, + 292, + 469 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 52, + 264, + 292, + 469 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 52, + 264, + 292, + 469 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 52, + 264, + 292, + 469 + ], + "type": "text", + "content": " denote the number of frames and the vector dimension, and (2) Following StableCodec (Parker et al., 2024), which directly uses a linear layer to encode the audio data into " + }, + { + "bbox": [ + 52, + 264, + 292, + 469 + ], + "type": "inline_equation", + "content": "e \\in \\mathcal{R}^{T \\times d}" + }, + { + "bbox": [ + 52, + 264, + 292, + 469 + ], + "type": "text", + "content": " and adds several transformer layers. Similarly, the UnPatchify mirrors the architecture of Patchify. If we use the Encodec-style Patchify module, the UnPatchify module substitutes stride convolutions with transposed convolutions and reverses the stride order. If we use the StableCodec-style Patchify module, the UnPatchify module includes a transformer block and a reshape operation. In our preliminary experiments, we find that the Encodec-style Patchify and UnPatchify modules bring better reconstruction performance. We adopt the Encodec-style Patchify module as our default setting." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 474, + 291, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 474, + 291, + 605 + ], + "spans": [ + { + "bbox": [ + 52, + 474, + 291, + 605 + ], + "type": "text", + "content": "Token Interleaving The token interleaving module aims to combine two token sequences into a single sequence. In the encoder part, we combine the audio frames " + }, + { + "bbox": [ + 52, + 474, + 291, + 605 + ], + "type": "inline_equation", + "content": "e \\in \\mathcal{R}^{T \\times d}" + }, + { + "bbox": [ + 52, + 474, + 291, + 605 + ], + "type": "text", + "content": " and the query token [CLS]. Assuming a window size of " + }, + { + "bbox": [ + 52, + 474, + 291, + 605 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 52, + 474, + 291, + 605 + ], + "type": "text", + "content": ", the query token will be inserted into the audio frame sequence at every " + }, + { + "bbox": [ + 52, + 474, + 291, + 605 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 52, + 474, + 291, + 605 + ], + "type": "text", + "content": "-intervals. In the decoder part, the token interleaving module is used to combine the quantized query tokens and learnable mask tokens. We insert " + }, + { + "bbox": [ + 52, + 474, + 291, + 605 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 52, + 474, + 291, + 605 + ], + "type": "text", + "content": " mask tokens before each query token. During the training stage, we dynamically choose the window size for each training iteration." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 611, + 290, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 611, + 290, + 659 + ], + "spans": [ + { + "bbox": [ + 52, + 611, + 290, + 659 + ], + "type": "text", + "content": "Token Retrieval The token retrieval module aims to retrieve the relevant tokens from a sequence. In the encoder part, we use it to retrieve the learnable query tokens. In the decoder part, we use it to retrieve the learnable mask tokens." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 665, + 290, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 665, + 290, + 677 + ], + "spans": [ + { + "bbox": [ + 52, + 665, + 290, + 677 + ], + "type": "text", + "content": "Query-based Transformer Encoder As the previous part" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 67, + 542, + 151 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 67, + 542, + 151 + ], + "spans": [ + { + "bbox": [ + 304, + 67, + 542, + 151 + ], + "type": "text", + "content": "discussed, we introduce a learnable query token " + }, + { + "bbox": [ + 304, + 67, + 542, + 151 + ], + "type": "inline_equation", + "content": "[\\mathrm{cls}] \\in \\mathcal{R}^{1 \\times d}" + }, + { + "bbox": [ + 304, + 67, + 542, + 151 + ], + "type": "text", + "content": " to capture holistic information from the audio frames " + }, + { + "bbox": [ + 304, + 67, + 542, + 151 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 304, + 67, + 542, + 151 + ], + "type": "text", + "content": ". As Figure 2 shows, we first combine the audio frames and query token using a token interleaving module with a window size " + }, + { + "bbox": [ + 304, + 67, + 542, + 151 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 304, + 67, + 542, + 151 + ], + "type": "text", + "content": ". Then, a transformer module is applied to model the whole sequence " + }, + { + "bbox": [ + 304, + 67, + 542, + 151 + ], + "type": "inline_equation", + "content": "e_a" + }, + { + "bbox": [ + 304, + 67, + 542, + 151 + ], + "type": "text", + "content": ". After that, we employ a token retrieval module to extract the query tokens " + }, + { + "bbox": [ + 304, + 67, + 542, + 151 + ], + "type": "inline_equation", + "content": "h \\in \\mathcal{R}^{[T / w] \\times d}" + }, + { + "bbox": [ + 304, + 67, + 542, + 151 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 334, + 156, + 542, + 185 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 334, + 156, + 542, + 185 + ], + "spans": [ + { + "bbox": [ + 334, + 156, + 542, + 185 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\boldsymbol {e} = P (\\boldsymbol {x}), \\boldsymbol {e} _ {\\boldsymbol {a}} = I n t e r l e a v i n g (\\boldsymbol {\\mathbf {e}}, \\boldsymbol {c l s}, w), \\tag {1} \\\\ \\boldsymbol {e} _ {\\boldsymbol {a}} = E n (\\boldsymbol {e} _ {\\boldsymbol {a}}), \\boldsymbol {h} = R e c t r i e v a l (\\boldsymbol {e} _ {\\boldsymbol {a}}, w) \\\\ \\end{array}", + "image_path": "36bf7ac0e5ab73bda6d7ca539bf65a78caa3f38942f1291ac6626ec8bb140cad.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 190, + 542, + 214 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 190, + 542, + 214 + ], + "spans": [ + { + "bbox": [ + 304, + 190, + 542, + 214 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 190, + 542, + 214 + ], + "type": "inline_equation", + "content": "P(\\cdot)" + }, + { + "bbox": [ + 304, + 190, + 542, + 214 + ], + "type": "text", + "content": " denotes the Patchify module. " + }, + { + "bbox": [ + 304, + 190, + 542, + 214 + ], + "type": "inline_equation", + "content": "En(\\cdot)" + }, + { + "bbox": [ + 304, + 190, + 542, + 214 + ], + "type": "text", + "content": " denotes the transformer encoder." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 220, + 542, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 220, + 542, + 388 + ], + "spans": [ + { + "bbox": [ + 304, + 220, + 542, + 388 + ], + "type": "text", + "content": "Residual Vector Quantization To build a low-bitrate audio codec, we empirically set the number of RVQ layers to 3, since we found that 3 RVQ layers suffice to build an effective audio codec model: " + }, + { + "bbox": [ + 304, + 220, + 542, + 388 + ], + "type": "inline_equation", + "content": "\\hat{h} = Q(h)" + }, + { + "bbox": [ + 304, + 220, + 542, + 388 + ], + "type": "text", + "content": ". Inspired by previous works (Zhu et al., 2024; Yang et al., 2024a), we first obtain the k-means clusters of Wav2vec2 (Baevski et al., 2020) to represent the speech semantic prior, and the k-means clusters of the BEATs (Chen et al., 2022b) to represent the general sound semantic prior. Assuming the codebook size is " + }, + { + "bbox": [ + 304, + 220, + 542, + 388 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 304, + 220, + 542, + 388 + ], + "type": "text", + "content": ", we set " + }, + { + "bbox": [ + 304, + 220, + 542, + 388 + ], + "type": "inline_equation", + "content": "C / 2" + }, + { + "bbox": [ + 304, + 220, + 542, + 388 + ], + "type": "text", + "content": " to represent speech, with the remaining portion representing general sound. We then use these semantic priors to initialize the codebook of the VQ layer and fix it. Next, we apply a linear layer to map the input features into the VQ layer." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 303, + 394, + 542, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 394, + 542, + 478 + ], + "spans": [ + { + "bbox": [ + 303, + 394, + 542, + 478 + ], + "type": "text", + "content": "Query-based Transformer Decoder To recover the audio information, we construct a reverse process using the encoder part. We first use the token interleaving module to combine the mask token " + }, + { + "bbox": [ + 303, + 394, + 542, + 478 + ], + "type": "inline_equation", + "content": "m \\in \\mathcal{R}^{1 \\times d}" + }, + { + "bbox": [ + 303, + 394, + 542, + 478 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 303, + 394, + 542, + 478 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{h}}" + }, + { + "bbox": [ + 303, + 394, + 542, + 478 + ], + "type": "text", + "content": ". The new sequence is then modeled by a transformer module. We expect that these mask tokens can be used to recover the audio information with the help of the Unpatchify module." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 330, + 483, + 542, + 512 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 483, + 542, + 512 + ], + "spans": [ + { + "bbox": [ + 330, + 483, + 542, + 512 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\boldsymbol {q} _ {\\boldsymbol {a}} = \\text {I n t e r l e a v i n g} (\\hat {\\boldsymbol {h}}, \\boldsymbol {m}, w), \\boldsymbol {q} _ {\\boldsymbol {a}} = D e (\\boldsymbol {q} _ {\\boldsymbol {a}}) \\tag {2} \\\\ \\boldsymbol {e} _ {\\boldsymbol {o}} = \\operatorname {R e c t r i e v a l} (\\boldsymbol {q} _ {\\boldsymbol {a}}, w), \\hat {\\boldsymbol {x}} = U n P (\\boldsymbol {e} _ {\\boldsymbol {o}}), \\\\ \\end{array}", + "image_path": "f7cd93e06c98bc3efdd9f5760db268469ffcaa66c85e4c9a9bb222007a090ce7.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 517, + 542, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 517, + 542, + 541 + ], + "spans": [ + { + "bbox": [ + 304, + 517, + 542, + 541 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 517, + 542, + 541 + ], + "type": "inline_equation", + "content": "Unp(\\cdot)" + }, + { + "bbox": [ + 304, + 517, + 542, + 541 + ], + "type": "text", + "content": " denotes the Unpatchify module. " + }, + { + "bbox": [ + 304, + 517, + 542, + 541 + ], + "type": "inline_equation", + "content": "De(\\cdot)" + }, + { + "bbox": [ + 304, + 517, + 542, + 541 + ], + "type": "text", + "content": " denotes the transformer decoder." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 555, + 384, + 567 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 555, + 384, + 567 + ], + "spans": [ + { + "bbox": [ + 304, + 555, + 384, + 567 + ], + "type": "text", + "content": "3.3. Training Loss" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 303, + 574, + 542, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 574, + 542, + 717 + ], + "spans": [ + { + "bbox": [ + 303, + 574, + 542, + 717 + ], + "type": "text", + "content": "Similar to previous audio CODECs, our approach is based on a GAN objective, where we optimize both the generator (which consists of the Patchify module, transformer encoder, quantizer, transformer decoder, and UnPatchify module) and the discriminators. For the generator, the training loss comprises four components: (1) reconstruction loss term; (2) adversarial loss term; (3) Masked AutoEncoder (MAE) loss; and (4) AR prediction loss. The reconstruction and adversarial losses typically follow previous works (Défossez et al., 2022; Zeghidour et al., 2021). In the following, we describe the MAE loss and AR prediction loss. More details of training loss refer to Appendix G." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 127, + 45, + 467, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 45, + 467, + 56 + ], + "spans": [ + { + "bbox": [ + 127, + 45, + 467, + 56 + ], + "type": "text", + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 52, + 684, + 290, + 706 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 684, + 290, + 706 + ], + "spans": [ + { + "bbox": [ + 52, + 684, + 290, + 706 + ], + "type": "text", + "content": "2Query tokens are learnable embedding vectors that are updated throughout the training process." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 67, + 291, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 67, + 291, + 235 + ], + "spans": [ + { + "bbox": [ + 52, + 67, + 291, + 235 + ], + "type": "text", + "content": "MAE Loss As we discussed in Section 1, a semantic-rich audio codec tokenizer is better suited for audio language modeling. Inspired by the success of MAE (He et al., 2022), we propose to incorporate an MAE loss during the training of the audio codec. Specifically, for the frame sequence " + }, + { + "bbox": [ + 52, + 67, + 291, + 235 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 52, + 67, + 291, + 235 + ], + "type": "text", + "content": ", we randomly choose several audio frame features and set these frames to zero, " + }, + { + "bbox": [ + 52, + 67, + 291, + 235 + ], + "type": "inline_equation", + "content": "e_m = \\mathrm{Mask}(e)" + }, + { + "bbox": [ + 52, + 67, + 291, + 235 + ], + "type": "text", + "content": ". We pass the masked features " + }, + { + "bbox": [ + 52, + 67, + 291, + 235 + ], + "type": "inline_equation", + "content": "e_m" + }, + { + "bbox": [ + 52, + 67, + 291, + 235 + ], + "type": "text", + "content": " into the encoder transformer. Then, the encoded features are passed into an MAE-decoder transformer block to predict " + }, + { + "bbox": [ + 52, + 67, + 291, + 235 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 52, + 67, + 291, + 235 + ], + "type": "text", + "content": ". In our experiments, we adopt a dynamic mask rate (from 0.2 to 0.3), we found that using a large mask rate will significantly influence the reconstruction performance. Following MAE (He et al., 2022), we apply the MSE loss to the masked audio frames." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 240, + 292, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 240, + 292, + 445 + ], + "spans": [ + { + "bbox": [ + 52, + 240, + 292, + 445 + ], + "type": "text", + "content": "AR Loss As shown in figure 3, we find that the first layer of RVQ-based audio codec models is easier to fit for the audio language model than the other layers (e.g., layers 2 and 3). One possible reason is that the first layer encodes more semantically related information. For speech data, most of the content information can be recovered by the first VQ layer, while the residual layers primarily encode acoustic-level information, which influences speech quality. To make the tokens in the residual layer easier to fit, we introduce an autoregressive (AR) prediction prior (Wang et al., 2024a) in the RVQ latent space. Specifically, we introduce a lightweight continuous autoregressive (AR) transformer3, which is used to conduct next-token prediction in the RVQ layer. For example, it is tasked with predicting the quantized feature of the third VQ layer based on the features of the first and second VQ layers. We use mean squared error (MSE) loss for optimization." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 457, + 194, + 470 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 457, + 194, + 470 + ], + "spans": [ + { + "bbox": [ + 52, + 457, + 194, + 470 + ], + "type": "text", + "content": "3.4. Two-stage Training Strategy" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 475, + 291, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 475, + 291, + 656 + ], + "spans": [ + { + "bbox": [ + 52, + 475, + 291, + 656 + ], + "type": "text", + "content": "Although training the ALMTokenizer using the typical Encoder (Défossez et al., 2022) setting is feasible, we introduce a two-stage training paradigm to improve both reconstruction performance and semantic information. Our motivation stems from the fact that audio codec quantization focuses on modeling local relationships, whereas semantic information focuses on modeling global relationships. These two goals are in conflict. To resolve this conflict, we present a two-stage training strategy. In the first stage, we do not incorporate the quantization part; instead, we train directly an AutoEncoder with Patchify and UnPatchify modules. To encode more semantic information in the Patchify module, we introduce MAE loss during this stage, by adding transformer-based MAE-encoder and decoder. The encoder processes the masked frame sequence, and the decoder pre" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 67, + 542, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 67, + 542, + 140 + ], + "spans": [ + { + "bbox": [ + 304, + 67, + 542, + 140 + ], + "type": "text", + "content": "dicts the masked part. After training, the transformer encoder and decoder are discarded. In the second stage, we first initialize the ALMTokensizer's Patchify and UnPatchify modules with the checkpoint from the first stage, and freeze the parameters of the Patchify module. Then, we train the model using the training loss described in Section 3.3." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 155, + 385, + 167 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 155, + 385, + 167 + ], + "spans": [ + { + "bbox": [ + 304, + 155, + 385, + 167 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 175, + 448, + 186 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 175, + 448, + 186 + ], + "spans": [ + { + "bbox": [ + 304, + 175, + 448, + 186 + ], + "type": "text", + "content": "4.1. Dataset and Training Details" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 303, + 194, + 543, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 194, + 543, + 361 + ], + "spans": [ + { + "bbox": [ + 303, + 194, + 543, + 361 + ], + "type": "text", + "content": "Data preparation for the audio codec ALMTokensizer is trained on approximately 4,500 hours of data. In the speech domain, we utilize LibriTTS training set (Zen et al., 2019) and a subset of Multilingual LibriSpeech (MLS) (Pratap et al., 2020), with 2,000 hours randomly selected. In the sound domain, we utilize a subset of AudioSet, with 1,000 hours randomly selected; in the music domain, we employ a subset of the Million Song Dataset (Bertin-Mahieux et al., 2011), also with 1,000 hours randomly selected. We evaluate the codec's speech reconstruction performance using a subset of the VCTK dataset (Veaux et al., 2017), and assess both audio and music reconstruction performance using the AudioCaps (Kim et al., 2019) validation set and the MusicCaps dataset (Agostinelli et al., 2023), respectively." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 303, + 367, + 544, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 367, + 544, + 475 + ], + "spans": [ + { + "bbox": [ + 303, + 367, + 544, + 475 + ], + "type": "text", + "content": "Data for Audio Language Models To assess the effectiveness of the proposed audio tokenizer, we construct an audio language model framework to perform six audio-related tasks. The details are provided in Appendix D.3 and D.4. For speech data, we select 2,000 hours of speech-text pairs from LibriHeavy (Kang et al., 2024). For sound data, we utilize the AudioCaps training set and BBC Sound Effects. For music data, we use a subset of the Million Song dataset and the caption data from LP-MusicCaps (Doh et al., 2023)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 303, + 480, + 544, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 480, + 544, + 696 + ], + "spans": [ + { + "bbox": [ + 303, + 480, + 544, + 696 + ], + "type": "text", + "content": "Implementation Details ALMTokenizer first performs patchification on the audio data, we set the patch size to 320 in all of experiments, which encodes 1 second of " + }, + { + "bbox": [ + 303, + 480, + 544, + 696 + ], + "type": "inline_equation", + "content": "24\\mathrm{kHz}" + }, + { + "bbox": [ + 303, + 480, + 544, + 696 + ], + "type": "text", + "content": " audio into 75 frames. For the Encoder-style Patchify module, we adopt the settings from Encodec (Défossez et al., 2022) encoder. To enable streaming for the codec model, a causal convolution layer is employed. For the encoder-transformer and decoder-transformer components, we use 24 self-attention layers, with latent dimensions of 256 and 512, respectively. Following StableCodec (Parker et al., 2024), the self-attention mechanism uses a causal sliding attention window of 64 steps to restrict the receptive field and promote the generalization of the architecture to sequences of arbitrary length. Rotary Positional Embeddings (RoPE) are used. Refer to Appendix G for the details of ALMTokenizer model training. For the audio language model, we follow the framework of Moshi (Défossez et al., 2024). For further details, refer to Appendix A." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 127, + 45, + 466, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 45, + 466, + 56 + ], + "spans": [ + { + "bbox": [ + 127, + 45, + 466, + 56 + ], + "type": "text", + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 52, + 662, + 291, + 715 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 662, + 291, + 715 + ], + "spans": [ + { + "bbox": [ + 52, + 662, + 291, + 715 + ], + "type": "text", + "content": "3The term continuous autoregressive (AR) transformer is used to distinguish our approach from traditional discrete AR models, which operate on discrete token sequences and are optimized using cross-entropy loss. In our study, to facilitate gradient backpropagation, we apply the AR transformer directly to continuous features." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 54, + 116, + 557, + 244 + ], + "blocks": [ + { + "bbox": [ + 52, + 74, + 542, + 115 + ], + "lines": [ + { + "bbox": [ + 52, + 74, + 542, + 115 + ], + "spans": [ + { + "bbox": [ + 52, + 74, + 542, + 115 + ], + "type": "text", + "content": "Table 1. The speech reconstruction and semantic performance comparison between the ALMTokensizer and previous tokenizers. FPS denotes that the frame number in one second. TPS denotes that the token number in one second. CS denotes the codebook size, BR denotes the bit-rate. ST denotes speechtokenizer. Bold for the best result and underline for the second-best result. Evaluation on VCTK dataset." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 54, + 116, + 557, + 244 + ], + "lines": [ + { + "bbox": [ + 54, + 116, + 557, + 244 + ], + "spans": [ + { + "bbox": [ + 54, + 116, + 557, + 244 + ], + "type": "table", + "html": "
ModelsFPS/TPSCS/BRReconstructionSemantic
UTMOS (↑)DNS-MOS (↑)VISQOL (↑)STOI (↑)PESQ (↑)ASR (↓)ER (↑)
Hubert (Hsu et al., 2021)-------6.531.0
WavLM (Chen et al., 2022a)-------6.229.0
Encodec (Défossez et al., 2022)50/1501024/1.5kbps2.583.273.640.812.035.326.5
DAC (Kumar et al., 2023)50/1501024/1.5kbps3.133.413.670.812.144.117.6
Wavtokenizer (Ji et al., 2024)40/404096/0.48kbps3.673.503.720.791.944.619.8
StableCodec (Parker et al., 2024)25/2546656/0.4kbps4.223.643.400.761.898.315.8
ST (Zhang et al., 2023)50/1501024/1.5kbps3.413.363.680.791.719.827.0
Mimi (Défossez et al., 2024)12.5/37.52048/0.41kbps3.013.143.280.751.525.128.0
Mimi (Défossez et al., 2024)12.5/1002048/1.1kbps3.653.383.820.822.123.828.3
ALMTokensizer (Ours)12.5/37.52048/0.41kbps3.763.643.780.812.018.329.0
", + "image_path": "082256a1f928b9b771e0604046246e9c0a0de124d1e9ec2d048c5dcfe42c390e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 263, + 155, + 274 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 263, + 155, + 274 + ], + "spans": [ + { + "bbox": [ + 52, + 263, + 155, + 274 + ], + "type": "text", + "content": "4.2. Evaluation Metrics" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 282, + 291, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 282, + 291, + 331 + ], + "spans": [ + { + "bbox": [ + 52, + 282, + 291, + 331 + ], + "type": "text", + "content": "We evaluate the performance of previous SOTA audio tokenizers, and our proposed ALMTokensizer across audio reconstruction, audio semantic information, audio understanding, and audio generation tasks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 336, + 290, + 420 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 336, + 290, + 420 + ], + "spans": [ + { + "bbox": [ + 52, + 336, + 290, + 420 + ], + "type": "text", + "content": "Audio Reconstruction For speech reconstruction, we use DNS-MOS, UT-MOS, PESQ, STOI (Short-time Objective Intelligibility), and VISQOL. For sound and music data evaluation, VISQOL (audio version), STFT loss, and Mel loss are used. Furthermore, following (Kumar et al., 2023), the MUSHRA subjective test is conducted for speech, sound, and music. Refer to Appendix D for more details." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 426, + 290, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 426, + 290, + 582 + ], + "spans": [ + { + "bbox": [ + 52, + 426, + 290, + 582 + ], + "type": "text", + "content": "Audio Semantic Information Previous SSL models, such as Hubert (Hsu et al., 2021), have shown that semantic-rich representation can be used to solve downstream recognition tasks by fine-tuning several adaptor layers. Thus, we can validate the performance of features of the audio tokenizer for downstream recognition tasks. For speech data, we conduct the automatic speech recognition (ASR) task on the LibriSpeech (Panayotov et al., 2015) dataset, and the emotion classification (EC) task on the EMOVO (Costantini et al., 2014) dataset. For sound data, we conduct sound classification tasks on the ESC-50 dataset (Piczak, 2015). For music data, we conduct music classification tasks on the Medley-solos-DB dataset (Lostanlen & Cella, 2016)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 587, + 290, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 587, + 290, + 706 + ], + "spans": [ + { + "bbox": [ + 52, + 587, + 290, + 706 + ], + "type": "text", + "content": "Audio Understanding To further validate whether the audio tokenizer is suitable for building an audio language model, we propose to conduct an understanding task using discrete tokens. We conduct three tasks: ASR, audio caption, and music caption. For the audio data, we use the audio tokenizer to transform it into discrete tokens, and for text data, we use the BPE tokenizer of LLAMA 3.2. For audio and music caption, we follow (Drossos et al., 2020) and adopt BLEU-1, BLEU-2, BLEU-3, METEOR, ROUGE-L, CIDEr-D, SPICE, and SPIDER metrics." + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 307, + 300, + 541, + 375 + ], + "blocks": [ + { + "bbox": [ + 304, + 251, + 542, + 296 + ], + "lines": [ + { + "bbox": [ + 304, + 251, + 542, + 296 + ], + "spans": [ + { + "bbox": [ + 304, + 251, + 542, + 296 + ], + "type": "text", + "content": "Table 2. The sound reconstruction performance comparison between the proposed ALMTokensizer and previous audio tokenizer models. SC denotes the sound classification task. Evaluation on AudioCaps validation set." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 300, + 541, + 375 + ], + "lines": [ + { + "bbox": [ + 307, + 300, + 541, + 375 + ], + "spans": [ + { + "bbox": [ + 307, + 300, + 541, + 375 + ], + "type": "table", + "html": "
ModelsViSQOL (↑)Mel loss (↓)STFT loss (↓)SC (↑)
BEATs---24%
Wav2vec2---53%
Encodec3.0516.31.2315%
DAC2.9817.61.2420%
Wavtokenizer2.1832.72.5012%
Ours2.9915.01.2444%
", + "image_path": "bc38bc37fde7e41eb41f137a017389aa04332f4882f0f1624d7d8a673e5c16c8.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 307, + 444, + 542, + 519 + ], + "blocks": [ + { + "bbox": [ + 304, + 397, + 542, + 440 + ], + "lines": [ + { + "bbox": [ + 304, + 397, + 542, + 440 + ], + "spans": [ + { + "bbox": [ + 304, + 397, + 542, + 440 + ], + "type": "text", + "content": "Table 3. The music reconstruction and semantic performance comparison between the ALMTokensizer and previous audio tokenizers. MC denotes the music classification task. Evaluation on Musiccaps dataset." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 444, + 542, + 519 + ], + "lines": [ + { + "bbox": [ + 307, + 444, + 542, + 519 + ], + "spans": [ + { + "bbox": [ + 307, + 444, + 542, + 519 + ], + "type": "table", + "html": "
ModelsViSQOL (↑)Mel loss (↓)STFT loss (↓)MC (↑)
BEATs---54%
Wav2vec2---65%
Encodec4.0434.81.2645%
DAC4.0635.91.2848%
Wavtokenizer3.8548.21.4754%
Ours3.9634.41.3259%
", + "image_path": "b85bef3806853a9e62f3afbd2d853354b00e87704dab5446ce7e4c2bfb7a5fe5.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 542, + 542, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 542, + 542, + 578 + ], + "spans": [ + { + "bbox": [ + 304, + 542, + 542, + 578 + ], + "type": "text", + "content": "Audio Generation We also conduct audio generation tasks, including text-to-speech, text-to-sound, and text-to-music. Refer to Appendix D for more details." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 591, + 527, + 602 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 591, + 527, + 602 + ], + "spans": [ + { + "bbox": [ + 304, + 591, + 527, + 602 + ], + "type": "text", + "content": "4.3. The Reconstruction and Semantic Performance" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 303, + 609, + 542, + 716 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 609, + 542, + 716 + ], + "spans": [ + { + "bbox": [ + 303, + 609, + 542, + 716 + ], + "type": "text", + "content": "We first compare the reconstruction and semantic performance of ALMTokensizer with previous audio tokenizers. Table 1 presents the speech reconstruction and semantic results. We observe the following: (1) In terms of reconstruction, ALMTokensizer achieves impressive results in the low-bitrate setting. For example, compared with previous SOTA models, MimiCodec and Wavtokenizer, ALMTokensizer achieves better reconstruction performance at a lower bitrate. We also note that StableCodec performs well on UT-" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 127, + 45, + 467, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 45, + 467, + 56 + ], + "spans": [ + { + "bbox": [ + 127, + 45, + 467, + 56 + ], + "type": "text", + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 54, + 118, + 293, + 201 + ], + "blocks": [ + { + "bbox": [ + 52, + 74, + 291, + 116 + ], + "lines": [ + { + "bbox": [ + 52, + 74, + 291, + 116 + ], + "spans": [ + { + "bbox": [ + 52, + 74, + 291, + 116 + ], + "type": "text", + "content": "Table 4. The LM-based TTS and ASR results. The first three metrics are used for TTS, while the last one is used for ASR. GLM4-Voice (Zeng et al., 2024) is a single layer semantic tokenizer. Evaluation on LibriSpeech test clean set." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 54, + 118, + 293, + 201 + ], + "lines": [ + { + "bbox": [ + 54, + 118, + 293, + 201 + ], + "spans": [ + { + "bbox": [ + 54, + 118, + 293, + 201 + ], + "type": "table", + "html": "
ModelsWER (↓)DNSMOS (↑)UT-MOS (↑)ASR (↓)
GLM4-voice9.93.963.7916.3 ± 1.5
DAC24.53.142.0658.4 ± 1.2
Encodec22.93.482.1477.2 ± 2.3
StableCodec22.73.633.7028.0 ± 1.9
Wavtokenizer18.53.723.5845.6 ± 2.7
MimiCodec16.03.672.9323.1 ± 1.5
Ours11.73.753.8819.6 ± 1.8
", + "image_path": "13fe390d7e322759f0fba09a33c1158fc9542e63e8aee9389f8bf854e645d46b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 226, + 291, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 226, + 291, + 597 + ], + "spans": [ + { + "bbox": [ + 52, + 226, + 291, + 597 + ], + "type": "text", + "content": "MOS. The main reason is that StableCodec has denoising capabilities, while the original audio includes some noise. This explains why StableCodec achieves good results on UTMOS but performs poorly on PESQ and STOI. (2) In terms of semantic information, ALMTokensizer demonstrates superior performance, e.g., ALMTokensizer outperforms previous SOTA models, such as Wavtokenizer and StableCodec " + }, + { + "bbox": [ + 52, + 226, + 291, + 597 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 52, + 226, + 291, + 597 + ], + "type": "text", + "content": ". Notably, in the emotion classification task, ALMTokensizer achieves performance comparable to previous SSL models, such as Hubert and WavLM. However, we also note that ALMTokensizer still lags behind these SSL models in ASR performance. We speculate that the inclusion of acoustic information may detract from ASR performance, despite ALMTokensizer containing rich semantic information. Table 2 and 3 show the sound and music experimental results. We can see that ALMTokensizer demonstrates strong reconstruction performance under the low-bitrate setting. Compared to WavTokenizer, the reconstruction performance shows significant improvement. Furthermore, we also note that sound and music are inherently more complex than speech, and encoding them at very low-bitrate remains a challenge. In terms of semantic information, ALMTokensizer significantly surpasses previous works, such as WavTokenizer and Encodec. In comparison with SSL models, BEATs (Chen et al., 2022b) and Wav2vec2-audioset version, ALMTokensizer shows comparable performance. We also perform the MUSHRA subjective test for the reconstruction performance. As shown in Table 7, we find that ALMTokensizer effectively maintains strong subjective reconstruction performance on speech, music, and audio, even with a very low-bitrate setting." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 610, + 268, + 621 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 610, + 268, + 621 + ], + "spans": [ + { + "bbox": [ + 52, + 610, + 268, + 621 + ], + "type": "text", + "content": "4.4. Audio Understanding and Generation Results" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 628, + 291, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 628, + 291, + 689 + ], + "spans": [ + { + "bbox": [ + 52, + 628, + 291, + 689 + ], + "type": "text", + "content": "Speech Understanding and Generation Tasks Table 4 shows the LM-based TTS and ASR results. For the TTS task, we mainly focus on robustness and speech quality. In terms of robustness, we can see that the GLM4-voice tokenizer (Zeng et al., 2024), MimiCodec, and the pro" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 324, + 67, + 521, + 211 + ], + "blocks": [ + { + "bbox": [ + 324, + 67, + 521, + 211 + ], + "lines": [ + { + "bbox": [ + 324, + 67, + 521, + 211 + ], + "spans": [ + { + "bbox": [ + 324, + 67, + 521, + 211 + ], + "type": "image", + "image_path": "d14fe6162b3ac4c03543d5fc61d9d14b856032b8180431b1c829610ad257161d.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 224, + 538, + 236 + ], + "lines": [ + { + "bbox": [ + 304, + 224, + 538, + 236 + ], + "spans": [ + { + "bbox": [ + 304, + 224, + 538, + 236 + ], + "type": "text", + "content": "Figure 3. The performance comparison with or without AR loss." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 303, + 261, + 543, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 261, + 543, + 475 + ], + "spans": [ + { + "bbox": [ + 303, + 261, + 543, + 475 + ], + "type": "text", + "content": "posed ALMTokensizer bring better performance than others, highlighting the importance of semantic information for LM-based speech generation. Compared to previous audio codec tokenizers, ALMTokensizer brings significant improvement. In terms of generated speech quality, ALMTokensizer also shows great advantages, further demonstrating that the proposed tokenizer is more suitable for audio language modeling. Similarly, when we conduct the ASR task using discrete tokens as input, semantic information is also important. Traditional audio codec models perform poorly in this setting, such as DAC, Encodec, and WavTokenizer. StableCodec was fine-tuned by using a CTC head to predict the force-aligned phoneme tags from pre-bottleneck latents. MimiCodec distills the semantic information from WavLM. Thus, they have better performance than previous codec models. In ALMTokensizer, we propose a novel codec framework and training loss to better encode semantic information in the codec model." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 303, + 482, + 544, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 482, + 544, + 601 + ], + "spans": [ + { + "bbox": [ + 303, + 482, + 544, + 601 + ], + "type": "text", + "content": "Sound/music Understanding and Generation Results We conduct text-to-sound, text-to-music, audio caption and music caption tasks within the same audio language model framework. The experimental results shown in Table 5 indicate that ALMTokensizer shows better performance in both audio caption and audio generation tasks, further demonstrating its advantages. We put more audio tokenizer reconstruction performance experiments on Appendix F, including evaluation on LibriTTS test set, length generalization, and compared to diffusion-based audio codec models." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 615, + 389, + 627 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 615, + 389, + 627 + ], + "spans": [ + { + "bbox": [ + 304, + 615, + 389, + 627 + ], + "type": "text", + "content": "4.5. Ablation Study" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 303, + 633, + 543, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 633, + 543, + 717 + ], + "spans": [ + { + "bbox": [ + 303, + 633, + 543, + 717 + ], + "type": "text", + "content": "In order to gain a more comprehensive understanding of ALMTokensizer, we systematically compared each key component using a controlled experimental setup, employing identical architectures and hyperparameters across all trials. The Effectiveness of Query-based Audio Compression In this study, we propose a query-based audio compression strategy for compressing audio data in a very low-bitrate" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 127, + 45, + 466, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 45, + 466, + 56 + ], + "spans": [ + { + "bbox": [ + 127, + 45, + 466, + 56 + ], + "type": "text", + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 52, + 696, + 291, + 717 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 696, + 291, + 717 + ], + "spans": [ + { + "bbox": [ + 52, + 696, + 291, + 717 + ], + "type": "text", + "content": "4StableCodec's feature dimension is 6, it is hard to apply it for down-streaming task by simple fine-tuning" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 63, + 95, + 532, + 217 + ], + "blocks": [ + { + "bbox": [ + 52, + 74, + 542, + 95 + ], + "lines": [ + { + "bbox": [ + 52, + 74, + 542, + 95 + ], + "spans": [ + { + "bbox": [ + 52, + 74, + 542, + 95 + ], + "type": "text", + "content": "Table 5. The LM-based sound, music understanding and generation. B1, B2, B3, RG, ME, CD, SP, and SD denote BLEU-1, BLEU-2, BLEU-3, METEOR, ROUGE-L, CIDEr-D, SPICE, and SPIDER, respectively. Evaluation on Audiocaps and Musiccaps datasets." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 63, + 95, + 532, + 217 + ], + "lines": [ + { + "bbox": [ + 63, + 95, + 532, + 217 + ], + "spans": [ + { + "bbox": [ + 63, + 95, + 532, + 217 + ], + "type": "table", + "html": "
ModelsUnderstandingGeneration
B1 (↑)B2(↑)B3 (↑)ME (↑)RG (↑)CD (↑)SP (↑)SD (↑)FD (↓)FAD (↓)KL (↓)
Sound Task
Encodec0.250.150.080.110.240.570.140.3510.038.221.73
DAC0.260.150.080.110.260.510.130.3214.1411.71.55
Wavtokenizer0.240.140.080.100.220.380.110.256.764.551.28
ALMTokensizer (Ours)0.280.170.110.120.240.600.150.374.116.160.55
Music Task
Encodec0.300.140.080.110.230.370.090.237.225.481.06
DAC0.290.140.080.110.230.370.090.2312.898.361.68
Wavtokenizer0.190.060.020.060.130.060.050.054.3911.930.88
ALMTokensizer (Ours)0.340.150.070.130.250.440.100.273.554.580.43
", + "image_path": "ef002626f27bb198e207f58573c778f26f9a135585bdd4fb9d3fb8064870efa7.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 62, + 257, + 532, + 417 + ], + "blocks": [ + { + "bbox": [ + 52, + 236, + 542, + 257 + ], + "lines": [ + { + "bbox": [ + 52, + 236, + 542, + 257 + ], + "spans": [ + { + "bbox": [ + 52, + 236, + 542, + 257 + ], + "type": "text", + "content": "Table 6. Ablation study of codec framework, training loss, and training strategy. ASR and ER are used to evaluate the semantic information. The others are used to evaluate the reconstruction performance. Experiments conduct on VCTK dataset." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 62, + 257, + 532, + 417 + ], + "lines": [ + { + "bbox": [ + 62, + 257, + 532, + 417 + ], + "spans": [ + { + "bbox": [ + 62, + 257, + 532, + 417 + ], + "type": "table", + "html": "
SettingUTMOS (↑)DNSMOS (↑)VISQOL (↑)PESQ (↑)STOI (↑)ASR (↓)ER (↑)
ALMTokensizer3.763.643.782.00.8118.329.0
Framework ablation
w/o the query-based framework2.493.133.371.580.7734.522.6
w/o Three additional loss3.543.413.441.690.7827.224.5
Training loss ablation
w/o semantic prior for VQ3.793.663.782.120.8319.228.4
w/o MAE loss3.703.763.832.100.8224.523.2
w/o AR loss3.723.813.802.080.8218.830.2
Different Patchify module
use Linear-Patchify3.473.363.271.780.7820.326.7
Training strategy ablation
w/o two-stage training3.603.393.241.550.7422.825.9
", + "image_path": "1867ab2fb144b56a586a497203f019a503a17af3cdbe1769e511357e997096e0.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 437, + 291, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 437, + 291, + 544 + ], + "spans": [ + { + "bbox": [ + 52, + 437, + 291, + 544 + ], + "type": "text", + "content": "setting. To validate its effectiveness, we follow previous audio codec models, such as MimiCodec (Défossez et al., 2024). In the encoder part, we use a stride size of [8, 6, 5, 4, 2] to compress 1-second, " + }, + { + "bbox": [ + 52, + 437, + 291, + 544 + ], + "type": "inline_equation", + "content": "24\\mathrm{kHz}" + }, + { + "bbox": [ + 52, + 437, + 291, + 544 + ], + "type": "text", + "content": " audio into " + }, + { + "bbox": [ + 52, + 437, + 291, + 544 + ], + "type": "inline_equation", + "content": "12.5\\mathrm{Hz}" + }, + { + "bbox": [ + 52, + 437, + 291, + 544 + ], + "type": "text", + "content": ", followed by applying 3 RVQ layers to quantize it. As shown in Table 6, using previous audio codec frameworks makes it difficult to maintain good reconstruction performance in very low-bitrate settings. As a result, the proposed query-based compression method is more effective in this setting." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 544, + 290, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 544, + 290, + 640 + ], + "spans": [ + { + "bbox": [ + 52, + 544, + 290, + 640 + ], + "type": "text", + "content": "The Influence of Semantic Prior for VQ To explore the influence of semantic priors on the audio codec model, we conduct an experiment where we remove the semantic prior and instead train a learnable RVQ following Encodec. As shown in Table 6, we find that updating the RVQ layer improves reconstruction performance but reduces semantic information, demonstrating that integrating semantic priors into the VQ layer enhances semantic information." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 641, + 290, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 641, + 290, + 712 + ], + "spans": [ + { + "bbox": [ + 52, + 641, + 290, + 712 + ], + "type": "text", + "content": "The Influence of MAE Loss We also conduct experiments to evaluate the effectiveness of the MAE loss. As shown in Table 6, we find that the MAE loss is crucial for enhancing the semantic information in the codec model. Although the MAE loss has a slight negative effect on reconstruction, it is a crucial factor in building a better audio tokenizer." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 303, + 437, + 542, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 437, + 542, + 521 + ], + "spans": [ + { + "bbox": [ + 303, + 437, + 542, + 521 + ], + "type": "text", + "content": "The Influence of AR Loss From Table 6, we observe that adding the AR loss reduces reconstruction performance. In Figure 3, we compare token prediction accuracy and TTS performance with and without LM loss. We observe that using LM loss significantly improves token prediction accuracy, particularly for the second and third VQ layers, which shows the effectiveness of our motivation and solution." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 303, + 521, + 542, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 521, + 542, + 713 + ], + "spans": [ + { + "bbox": [ + 303, + 521, + 542, + 713 + ], + "type": "text", + "content": "The Influence of Two-stage Training As Table 6 shows, the two-stage training strategy is crucial as it significantly improves reconstruction performance and semantic information in the codec model. The Influence of Patchify Module We investigate two types of Patchify modules: Encode-style and StableCodec-style. As shown in Table 6, using Encode-style Patchify modules yields better performance. One possible reason is that StableCodec-style Patchify modules (Parker et al., 2024) may depend on larger data and model parameters, as the original paper scales their model to 1B. In contrast, we use only four transformer layers to ensure a fair comparison with Encode-style modules. Due to page limitations, we defer the ablation study on the influence of window size " + }, + { + "bbox": [ + 303, + 521, + 542, + 713 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 303, + 521, + 542, + 713 + ], + "type": "text", + "content": " in query-based compression, codebook size, the influence of mask-rate, and model size on reconstruction to Appendix C." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 127, + 45, + 466, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 45, + 466, + 56 + ], + "spans": [ + { + "bbox": [ + 127, + 45, + 466, + 56 + ], + "type": "text", + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 79, + 100, + 517, + 232 + ], + "blocks": [ + { + "bbox": [ + 52, + 74, + 542, + 95 + ], + "lines": [ + { + "bbox": [ + 52, + 74, + 542, + 95 + ], + "spans": [ + { + "bbox": [ + 52, + 74, + 542, + 95 + ], + "type": "text", + "content": "Table 7. The subjective reconstruction results using MUSHRA (comparative scoring of samples) of codec models on speech, sound and music. Bold for the best result and underline for the second-best result." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 79, + 100, + 517, + 232 + ], + "lines": [ + { + "bbox": [ + 79, + 100, + 517, + 232 + ], + "spans": [ + { + "bbox": [ + 79, + 100, + 517, + 232 + ], + "type": "table", + "html": "
ModelsFPS/TPSCS/BRSpeech (↑)Sound (↑)Music (↑)
Speech
MimiCodec (3 RVQ) (Défossez et al., 2024)12.5/37.52048/0.41kbps65.61 ± 5.2--
MimiCodec (8 RVQ) (Défossez et al., 2024)12.5/1002048/1.1kbps86.7 ± 2.3--
StableCodec (Parker et al., 2024)25/2546656/0.4kbps81.7 ± 4.4--
SpeechTokenizer (Zhang et al., 2023)50/1501024/1.5bps73.7 ± 4.6--
Audio
Encodec (Défossez et al., 2022)50/1501024/1.5bps75.1 ± 3.977.2 ± 4.273.7 ± 4.6
DAC (Kumar et al., 2023)50/1501024/1.5bps79.3 ± 4.271.3 ± 4.171.3 ± 4.1
Wavtokenizer (Défossez et al., 2022)40/404096/0.48bps84.0 ± 2.163.1 ± 4.654.1 ± 5.4
Ours12.5/37.52048/0.41kbps84.8 ± 3.772.4 ± 4.769.0 ± 4.5
", + "image_path": "3c483e95f4e46d9b474b355afbc8f55a9279df8ed331b640ead0025710bc583e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 252, + 118, + 263 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 252, + 118, + 263 + ], + "spans": [ + { + "bbox": [ + 52, + 252, + 118, + 263 + ], + "type": "text", + "content": "4.6. Discussion" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 271, + 290, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 271, + 290, + 330 + ], + "spans": [ + { + "bbox": [ + 52, + 271, + 290, + 330 + ], + "type": "text", + "content": "In this section, we discuss two fundamental questions in audio tokenization. Question 1: Is a single quantization layer better than multiple quantization layers? Question 2: Does a low-bit rate with high reconstruction performance define a good audio tokenizer?" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 331, + 291, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 331, + 291, + 462 + ], + "spans": [ + { + "bbox": [ + 52, + 331, + 291, + 462 + ], + "type": "text", + "content": "Question 1 Although WavTokenizer and StableCodec demonstrate the potential to build a low-bitrate audio codec tokenizer with a single quantization layer, they rely on a higher frame rate (e.g., 25 or " + }, + { + "bbox": [ + 52, + 331, + 291, + 462 + ], + "type": "inline_equation", + "content": "40\\mathrm{Hz}" + }, + { + "bbox": [ + 52, + 331, + 291, + 462 + ], + "type": "text", + "content": "). As shown in Figure 1, a lower frame rate (e.g., " + }, + { + "bbox": [ + 52, + 331, + 291, + 462 + ], + "type": "inline_equation", + "content": "12.5\\mathrm{Hz}" + }, + { + "bbox": [ + 52, + 331, + 291, + 462 + ], + "type": "text", + "content": ") is critical for improving training efficiency. Thanks to UniAudio (Yang et al., 2023c) and Moshi's (Défossez et al., 2024) audio language model framework, multiple quantization layers do not increase the sequence length. Therefore, multiple quantization layers present an effective approach for building a low-bitrate, semantically rich audio codec." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 463, + 292, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 463, + 292, + 653 + ], + "spans": [ + { + "bbox": [ + 52, + 463, + 292, + 653 + ], + "type": "text", + "content": "Question 2 To address this question, we present two comparisons. First, as shown in Tables 4 and 1, StableCodec exhibits better reconstruction performance and a lower bit-rate compared to WavTokenizer. However, when applied to the text-to-speech generation task, WavTokenizer demonstrates better robustness. One possible reason for this is that StableCodec uses a large-scale codebook size (46,656), which may increase the modeling complexity. Second, although MimiCodec has a higher bit-rate and poorer reconstruction performance than StableCodec, it demonstrates more stable TTS generation performance and better ASR performance. This phenomenon further underscores the importance of semantic information. In summary, a good audio tokenizer for an audio language model should not only consider low-bitrate and reconstruction, but also account for the semantic information in the codec model." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 669, + 126, + 681 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 669, + 126, + 681 + ], + "spans": [ + { + "bbox": [ + 52, + 669, + 126, + 681 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 689, + 291, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 689, + 291, + 715 + ], + "spans": [ + { + "bbox": [ + 52, + 689, + 291, + 715 + ], + "type": "text", + "content": "In this study, we present a low-bitrate, semantically rich audio codec tokenizer. Specifically, we propose a query-based" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 303, + 252, + 543, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 252, + 543, + 361 + ], + "spans": [ + { + "bbox": [ + 303, + 252, + 543, + 361 + ], + "type": "text", + "content": "compression strategy to effectively compress the audio data into a low-bitrate format while incorporating more semantic information. Furthermore, we introduce several training losses to enhance semantic information, including MAE loss and AR loss. Extensive experiments demonstrate the effectiveness of ALMTokensizer. Within the same audio language modeling framework, ALMTokensizer exhibits superior performance in both understanding and generation tasks. We discuss the limitation of this study in Appendix I." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 375, + 400, + 388 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 375, + 400, + 388 + ], + "spans": [ + { + "bbox": [ + 304, + 375, + 400, + 388 + ], + "type": "text", + "content": "Ethical Statement" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 303, + 396, + 544, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 396, + 544, + 479 + ], + "spans": [ + { + "bbox": [ + 303, + 396, + 544, + 479 + ], + "type": "text", + "content": "This paper presents an audio tokenizer for audio language models, which can be applied to various audio generation tasks, such as text-to-speech and text-to-music. There is potential for misuse in generating misinformation, deepfake audio, or other harmful content. We advocate for the development of a detection model to identify audio produced by the codec model and generated by other generative models." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 495, + 364, + 508 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 495, + 364, + 508 + ], + "spans": [ + { + "bbox": [ + 304, + 495, + 364, + 508 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 514, + 544, + 717 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 304, + 514, + 544, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 514, + 544, + 563 + ], + "spans": [ + { + "bbox": [ + 304, + 514, + 544, + 563 + ], + "type": "text", + "content": "Agostinelli, A., Denk, T. I., Borsos, Z., Engel, J., Verzetti, M., Caillon, A., Huang, Q., Jansen, A., Roberts, A., Tagliasacchi, M., et al. Musicl: Generating music from text. arXiv preprint arXiv:2301.11325, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 569, + 544, + 619 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 569, + 544, + 619 + ], + "spans": [ + { + "bbox": [ + 304, + 569, + 544, + 619 + ], + "type": "text", + "content": "Ai, Y., Jiang, X.-H., Lu, Y.-X., Du, H.-P., and Ling, Z.-H. Apocodec: A neural audio codec with parallel amplitude and phase spectrum encoding and decoding. arXiv preprint arXiv:2402.10533, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 625, + 544, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 625, + 544, + 673 + ], + "spans": [ + { + "bbox": [ + 304, + 625, + 544, + 673 + ], + "type": "text", + "content": "Baevski, A., Zhou, Y., Mohamed, A., and Auli, M. wav2vec 2.0: A framework for self-supervised learning of speech representations. Advances in neural information processing systems, 33:12449-12460, 2020." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 681, + 544, + 717 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 681, + 544, + 717 + ], + "spans": [ + { + "bbox": [ + 304, + 681, + 544, + 717 + ], + "type": "text", + "content": "Banerjee, A. and Arora, V. wav2tok: Deep sequence tokenizer for audio retrieval. In The Eleventh International Conference on Learning Representations, 2022." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 127, + 45, + 467, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 45, + 467, + 56 + ], + "spans": [ + { + "bbox": [ + 127, + 45, + 467, + 56 + ], + "type": "text", + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 67, + 291, + 718 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 53, + 67, + 291, + 115 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 67, + 291, + 115 + ], + "spans": [ + { + "bbox": [ + 53, + 67, + 291, + 115 + ], + "type": "text", + "content": "Bertin-Mahieux, T., Ellis, D. P., Whitman, B., and Lamere, P. The million song dataset. In Proceedings of the 12th International Conference on Music Information Retrieval (ISMIR 2011), 2011." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 122, + 291, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 122, + 291, + 184 + ], + "spans": [ + { + "bbox": [ + 53, + 122, + 291, + 184 + ], + "type": "text", + "content": "Borsos, Z., Marinier, R., Vincent, D., Kharitonov, E., Pietquin, O., Sharifi, M., Roblek, D., Teboul, O., Grangier, D., Tagliasacchi, M., et al. Audiolm: a language modeling approach to audio generation. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 2023a." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 190, + 291, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 190, + 291, + 239 + ], + "spans": [ + { + "bbox": [ + 53, + 190, + 291, + 239 + ], + "type": "text", + "content": "Borsos, Z., Sharifi, M., Vincent, D., Kharitonov, E., Zeghidour, N., and Tagliasacchi, M. Soundstorm: Efficient parallel audio generation. arXiv preprint arXiv:2305.09636, 2023b." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 246, + 291, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 246, + 291, + 306 + ], + "spans": [ + { + "bbox": [ + 53, + 246, + 291, + 306 + ], + "type": "text", + "content": "Chen, S., Wang, C., Chen, Z., Wu, Y., Liu, S., Chen, Z., Li, J., Kanda, N., Yoshioka, T., Xiao, X., et al. Wavlm: Large-scale self-supervised pre-training for full stack speech processing. IEEE Journal of Selected Topics in Signal Processing, 16(6):1505-1518, 2022a." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 314, + 291, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 314, + 291, + 350 + ], + "spans": [ + { + "bbox": [ + 53, + 314, + 291, + 350 + ], + "type": "text", + "content": "Chen, S., Wu, Y., Wang, C., Liu, S., Tompkins, D., Chen, Z., and Wei, F. Beats: Audio pre-training with acoustic tokenizers. arXiv preprint arXiv:2212.09058, 2022b." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 358, + 291, + 395 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 358, + 291, + 395 + ], + "spans": [ + { + "bbox": [ + 53, + 358, + 291, + 395 + ], + "type": "text", + "content": "Copet, J., Kreuk, F., Gat, I., Remez, T., Kant, D., Synnaeve, G., Adi, Y., and Defossez, A. Simple and controllable music generation. arXiv preprint arXiv:2306.05284, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 402, + 291, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 402, + 291, + 474 + ], + "spans": [ + { + "bbox": [ + 53, + 402, + 291, + 474 + ], + "type": "text", + "content": "Costantini, G., Iaderola, I., Paoloni, A., Todisco, M., et al. Emovo corpus: an italian emotional speech database. In Proceedings of the ninth international conference on language resources and evaluation (LREC'14), pp. 3501-3504. European Language Resources Association (ELRA), 2014." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 482, + 291, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 482, + 291, + 517 + ], + "spans": [ + { + "bbox": [ + 53, + 482, + 291, + 517 + ], + "type": "text", + "content": "Défossez, A., Copet, J., Synnaeve, G., and Adi, Y. High fidelity neural audio compression. arXiv preprint arXiv:2210.13438, 2022." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 525, + 291, + 573 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 525, + 291, + 573 + ], + "spans": [ + { + "bbox": [ + 53, + 525, + 291, + 573 + ], + "type": "text", + "content": "Défossez, A., Mazaré, L., Orsini, M., Royer, A., Pérez, P., Jégou, H., Grave, E., and Zeghidour, N. Moshi: a speech-text foundation model for real-time dialogue. arXiv preprint arXiv:2410.00037, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 581, + 291, + 617 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 581, + 291, + 617 + ], + "spans": [ + { + "bbox": [ + 53, + 581, + 291, + 617 + ], + "type": "text", + "content": "Doh, S., Choi, K., Lee, J., and Nam, J. Lp-musiccaps: Llm-based pseudo music captioning. arXiv preprint arXiv:2307.16372, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 625, + 291, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 625, + 291, + 673 + ], + "spans": [ + { + "bbox": [ + 53, + 625, + 291, + 673 + ], + "type": "text", + "content": "Drossos, K., Lipping, S., and Virtanen, T. Clotho: An audio captioning dataset. In ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 736-740. IEEE, 2020." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 681, + 291, + 718 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 681, + 291, + 718 + ], + "spans": [ + { + "bbox": [ + 53, + 681, + 291, + 718 + ], + "type": "text", + "content": "Du, Z., Chen, Q., Zhang, S., Hu, K., Lu, H., Yang, Y., Hu, H., Zheng, S., Gu, Y., Ma, Z., et al. Cosyvoice: A scalable multilingual zero-shot text-to-speech synthesizer" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 306, + 67, + 542, + 717 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 315, + 67, + 541, + 91 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 67, + 541, + 91 + ], + "spans": [ + { + "bbox": [ + 315, + 67, + 541, + 91 + ], + "type": "text", + "content": "based on supervised semantic tokens. arXiv preprint arXiv:2407.05407, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 99, + 542, + 147 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 99, + 542, + 147 + ], + "spans": [ + { + "bbox": [ + 306, + 99, + 542, + 147 + ], + "type": "text", + "content": "Hao, H., Zhou, L., Liu, S., Li, J., Hu, S., Wang, R., and Wei, F. Boosting large language model for speech synthesis: An empirical study. arXiv preprint arXiv:2401.00246, 2023." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 156, + 542, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 156, + 542, + 205 + ], + "spans": [ + { + "bbox": [ + 306, + 156, + 542, + 205 + ], + "type": "text", + "content": "He, K., Chen, X., Xie, S., Li, Y., Dollar, P., and Girshick, R. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 16000-16009, 2022." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 213, + 542, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 213, + 542, + 249 + ], + "spans": [ + { + "bbox": [ + 306, + 213, + 542, + 249 + ], + "type": "text", + "content": "Ho, J., Jain, A., and Abbeel, P. Denoising diffusion probabilistic models. Advances in neural information processing systems, 33:6840-6851, 2020." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 258, + 542, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 258, + 542, + 329 + ], + "spans": [ + { + "bbox": [ + 306, + 258, + 542, + 329 + ], + "type": "text", + "content": "Hsu, W.-N., Bolte, B., Tsai, Y.-H. H., Lakhotia, K., Salakhutdinov, R., and Mohamed, A. Hubert: Self-supervised speech representation learning by masked prediction of hidden units. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 29:3451-3460, 2021." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 338, + 542, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 338, + 542, + 386 + ], + "spans": [ + { + "bbox": [ + 306, + 338, + 542, + 386 + ], + "type": "text", + "content": "Hu, E. J., Shen, Y., Wallis, P., Allen-Zhu, Z., Li, Y., Wang, S., Wang, L., and Chen, W. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685, 2021." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 395, + 542, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 395, + 542, + 443 + ], + "spans": [ + { + "bbox": [ + 306, + 395, + 542, + 443 + ], + "type": "text", + "content": "Huang, P.-Y., Xu, H., Li, J., Baevski, A., Auli, M., Galuba, W., Metze, F., and Feichtenhofer, C. Masked autoencoders that listen. Advances in Neural Information Processing Systems, 35:28708-28720, 2022." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 306, + 451, + 542, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 451, + 542, + 510 + ], + "spans": [ + { + "bbox": [ + 306, + 451, + 542, + 510 + ], + "type": "text", + "content": "Ji, S., Jiang, Z., Wang, W., Chen, Y., Fang, M., Zuo, J., Yang, Q., Cheng, X., Wang, Z., Li, R., et al. Wavtokenizer: an efficient acoustic discrete codec tokenizer for audio language modeling. arXiv preprint arXiv:2408.16532, 2024." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 306, + 520, + 542, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 520, + 542, + 568 + ], + "spans": [ + { + "bbox": [ + 306, + 520, + 542, + 568 + ], + "type": "text", + "content": "Ju, Z., Wang, Y., Shen, K., Tan, X., Xin, D., Yang, D., Liu, Y., Leng, Y., Song, K., Tang, S., et al. Naturalspeech 3: Zero-shot speech synthesis with factorized codec and diffusion models. arXiv preprint arXiv:2403.03100, 2024." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 306, + 576, + 542, + 648 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 576, + 542, + 648 + ], + "spans": [ + { + "bbox": [ + 306, + 576, + 542, + 648 + ], + "type": "text", + "content": "Kang, W., Yang, X., Yao, Z., Kuang, F., Yang, Y., Guo, L., Lin, L., and Povey, D. Libriheavy: a 50,000 hours asr corpus with punctuation casing and context. In ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 10991-10995. IEEE, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 306, + 657, + 542, + 717 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 657, + 542, + 717 + ], + "spans": [ + { + "bbox": [ + 306, + 657, + 542, + 717 + ], + "type": "text", + "content": "Kharitonov, E., Vincent, D., Borsos, Z., Marinier, R., Girgin, S., Pietquin, O., Sharifi, M., Tagliasacchi, M., and Zeghidour, N. Speak, read and prompt: High-fidelity text-to-speech with minimal supervision. arXiv preprint arXiv:2302.03540, 2023." + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 127, + 45, + 467, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 45, + 467, + 57 + ], + "spans": [ + { + "bbox": [ + 127, + 45, + 467, + 57 + ], + "type": "text", + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 67, + 293, + 718 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 53, + 67, + 293, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 67, + 293, + 140 + ], + "spans": [ + { + "bbox": [ + 53, + 67, + 293, + 140 + ], + "type": "text", + "content": "Kim, C. D., Kim, B., Lee, H., and Kim, G. Audiocaps: Generating captions for audios in the wild. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pp. 119-132, 2019." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 146, + 293, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 146, + 293, + 195 + ], + "spans": [ + { + "bbox": [ + 53, + 146, + 293, + 195 + ], + "type": "text", + "content": "Kreuk, F., Synnaeve, G., Polyak, A., Singer, U., Défossez, A., Copet, J., Parikh, D., Taigman, Y., and Adi, Y. Audiogen: Textually guided audio generation. arXiv preprint arXiv:2209.15352, 2022." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 201, + 292, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 201, + 292, + 262 + ], + "spans": [ + { + "bbox": [ + 53, + 201, + 292, + 262 + ], + "type": "text", + "content": "Kumar, R., Seetharaman, P., Luebs, A., Kumar, I., and Kumar, K. High-fidelity audio compression with improved RVQGAN. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=qjnl1QUUnFA." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 270, + 293, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 270, + 293, + 353 + ], + "spans": [ + { + "bbox": [ + 53, + 270, + 293, + 353 + ], + "type": "text", + "content": "La Quatra, M., Koudounas, A., Vaiani, L., Baralis, E., Cagliero, L., Garza, P., and Siniscalchi, S. M. Benchmarking representations for speech, music, and acoustic events. In 2024 IEEE International Conference on Acoustics, Speech, and Signal Processing Workshops (ICAS-SPW), pp. 505-509, 2024. doi: 10.1109/ICASSPW62465.2024.10625960." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 360, + 292, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 360, + 292, + 409 + ], + "spans": [ + { + "bbox": [ + 53, + 360, + 292, + 409 + ], + "type": "text", + "content": "Li, H., Xue, L., Guo, H., Zhu, X., Lv, Y., Xie, L., Chen, Y., Yin, H., and Li, Z. Single-codec: Single-codebook speech codec towards high-performance speech generation. arXiv preprint arXiv:2406.07422, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 416, + 291, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 416, + 291, + 464 + ], + "spans": [ + { + "bbox": [ + 53, + 416, + 291, + 464 + ], + "type": "text", + "content": "Li, J., Li, D., Savarese, S., and Hoi, S. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In International conference on machine learning, pp. 19730-19742. PMLR, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 472, + 291, + 507 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 472, + 291, + 507 + ], + "spans": [ + { + "bbox": [ + 53, + 472, + 291, + 507 + ], + "type": "text", + "content": "Lipman, Y., Chen, R. T., Ben-Hamu, H., Nickel, M., and Le, M. Flow matching for generative modeling. arXiv preprint arXiv:2210.02747, 2022." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 514, + 292, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 514, + 292, + 563 + ], + "spans": [ + { + "bbox": [ + 53, + 514, + 292, + 563 + ], + "type": "text", + "content": "Liu, H., Xu, X., Yuan, Y., Wu, M., Wang, W., and Plumbley, M. D. Semanticodec: An ultra low bitrate semantic audio codec for general sound. arXiv preprint arXiv:2405.00233, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 571, + 292, + 606 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 571, + 292, + 606 + ], + "spans": [ + { + "bbox": [ + 53, + 571, + 292, + 606 + ], + "type": "text", + "content": "Lostanlen, V. and Cella, C.-E. Deep convolutional networks on the pitch spiral for musical instrument recognition. arXiv preprint arXiv:1605.06644, 2016." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 613, + 292, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 613, + 292, + 673 + ], + "spans": [ + { + "bbox": [ + 53, + 613, + 292, + 673 + ], + "type": "text", + "content": "Mei, X., Meng, C., Liu, H., Kong, Q., Ko, T., Zhao, C., Plumbley, M. D., Zou, Y., and Wang, W. Wavcaps: A chatgpt-assisted weakly-labelled audio captioning dataset for audio-language multimodal research. arXiv preprint arXiv:2303.17395, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 681, + 292, + 718 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 681, + 292, + 718 + ], + "spans": [ + { + "bbox": [ + 53, + 681, + 292, + 718 + ], + "type": "text", + "content": "Nguyen, T. A., Muller, B., Yu, B., Costa-Jussa, M. R., Elbayad, M., Popuri, S., Ropers, C., Duquenne, P.-A., Algayres, R., Mavlyutov, R., et al. Spirit-lm: Interleaved" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 306, + 67, + 544, + 718 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 315, + 67, + 543, + 102 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 67, + 543, + 102 + ], + "spans": [ + { + "bbox": [ + 315, + 67, + 543, + 102 + ], + "type": "text", + "content": "spoken and written language model. Transactions of the Association for Computational Linguistics, 13:30-52, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 114, + 542, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 114, + 542, + 137 + ], + "spans": [ + { + "bbox": [ + 306, + 114, + 542, + 137 + ], + "type": "text", + "content": "OpenAI. Gpt-4 technical report. arXiv preprint arXiv:2204.06125, 2023." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 148, + 544, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 148, + 544, + 206 + ], + "spans": [ + { + "bbox": [ + 306, + 148, + 544, + 206 + ], + "type": "text", + "content": "Panayotov, V., Chen, G., Povey, D., and Khudanpur, S. Librispeech: an asr corpus based on public domain audio books. In 2015 IEEE international conference on acoustics, speech and signal processing (ICASSP), pp. 5206-5210. IEEE, 2015." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 217, + 544, + 265 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 217, + 544, + 265 + ], + "spans": [ + { + "bbox": [ + 306, + 217, + 544, + 265 + ], + "type": "text", + "content": "Parker, J. D., Smirnov, A., Pons, J., Carr, C., Zukowski, Z., Evans, Z., and Liu, X. Scaling transformers for low-bitrate high-quality speech coding. arXiv preprint arXiv:2411.19842, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 275, + 544, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 275, + 544, + 312 + ], + "spans": [ + { + "bbox": [ + 306, + 275, + 544, + 312 + ], + "type": "text", + "content": "Piczak, K. J. Esc: Dataset for environmental sound classification. In Proceedings of the 23rd ACM international conference on Multimedia, pp. 1015-1018, 2015." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 322, + 544, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 322, + 544, + 357 + ], + "spans": [ + { + "bbox": [ + 306, + 322, + 544, + 357 + ], + "type": "text", + "content": "Pratap, V., Xu, Q., Sriram, A., Synnaeve, G., and Collobert, R. Mls: A large-scale multilingual dataset for speech research. arXiv preprint arXiv:2012.03411, 2020." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 367, + 544, + 428 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 367, + 544, + 428 + ], + "spans": [ + { + "bbox": [ + 306, + 367, + 544, + 428 + ], + "type": "text", + "content": "Reddy, C. K., Gopal, V., and Cutler, R. Dnsmos p. 835: A non-intrusive perceptual objective speech quality metric to evaluate noise suppressors. In ICASSP 2022-2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 886-890. IEEE, 2022." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 437, + 544, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 437, + 544, + 485 + ], + "spans": [ + { + "bbox": [ + 306, + 437, + 544, + 485 + ], + "type": "text", + "content": "Saeki, T., Xin, D., Nakata, W., Koriyama, T., Takamichi, S., and Saruwatari, H. Utmos: Utokyo-sarulab system for voicemos challenge 2022. arXiv preprint arXiv:2204.02152, 2022." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 306, + 495, + 544, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 495, + 544, + 532 + ], + "spans": [ + { + "bbox": [ + 306, + 495, + 544, + 532 + ], + "type": "text", + "content": "Siuzdak, H. Vocos: Closing the gap between time-domain and fourier-based neural vocoders for high-quality audio synthesis. arXiv preprint arXiv:2306.00814, 2023." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 306, + 541, + 544, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 541, + 544, + 613 + ], + "spans": [ + { + "bbox": [ + 306, + 541, + 544, + 613 + ], + "type": "text", + "content": "Tang, C., Yu, W., Sun, G., Chen, X., Tan, T., Li, W., Lu, L., MA, Z., and Zhang, C. SALMONN: Towards generic hearing abilities for large language models. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=14rn7HpKVk." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 306, + 624, + 544, + 659 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 624, + 544, + 659 + ], + "spans": [ + { + "bbox": [ + 306, + 624, + 544, + 659 + ], + "type": "text", + "content": "van Niekerk, B., Zäïdi, J., Carbonneau, M.-A., and Kamper, H. Spoken-term discovery using discrete speech units. arXiv preprint arXiv:2408.14390, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 306, + 670, + 544, + 718 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 670, + 544, + 718 + ], + "spans": [ + { + "bbox": [ + 306, + 670, + 544, + 718 + ], + "type": "text", + "content": "Veaux, C., Yamagishi, J., MacDonald, K., et al. Cstr vctk corpus: English multi-speaker corpus for cstr voice cloning toolkit. University of Edinburgh. The Centre for Speech Technology Research (CSTR), 6:15, 2017." + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 127, + 45, + 467, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 45, + 467, + 57 + ], + "spans": [ + { + "bbox": [ + 127, + 45, + 467, + 57 + ], + "type": "text", + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 302, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 302, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 302, + 740 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 67, + 291, + 716 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 53, + 67, + 291, + 115 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 67, + 291, + 115 + ], + "spans": [ + { + "bbox": [ + 53, + 67, + 291, + 115 + ], + "type": "text", + "content": "Wang, C., Chen, S., Wu, Y., Zhang, Z., Zhou, L., Liu, S., Chen, Z., Liu, Y., Wang, H., Li, J., et al. Neural codec language models are zero-shot text to speech synthesizers. arXiv preprint arXiv:2301.02111, 2023." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 125, + 291, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 125, + 291, + 172 + ], + "spans": [ + { + "bbox": [ + 53, + 125, + 291, + 172 + ], + "type": "text", + "content": "Wang, H., Suri, S., Ren, Y., Chen, H., and Shrivastava, A. Larp: Tokenizing videos with a learned autoregressive generative prior. arXiv preprint arXiv:2410.21264, 2024a." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 182, + 291, + 254 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 182, + 291, + 254 + ], + "spans": [ + { + "bbox": [ + 53, + 182, + 291, + 254 + ], + "type": "text", + "content": "Wang, Y., Chen, H., Yang, D., Yu, J., Weng, C., Wu, Z., and Meng, H. Consistent and relevant: Rethink the query embedding in general sound separation. In ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 961-965. IEEE, 2024b." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 264, + 291, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 264, + 291, + 312 + ], + "spans": [ + { + "bbox": [ + 53, + 264, + 291, + 312 + ], + "type": "text", + "content": "Wang, Y., Chen, H., Yang, D., Li, W., Luo, D., Li, G., Yang, S., Wu, Z., Meng, H., and Wu, X. Unisep: Universal target audio separation with language models at scale. arXiv preprint arXiv:2503.23762, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 322, + 291, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 322, + 291, + 358 + ], + "spans": [ + { + "bbox": [ + 53, + 322, + 291, + 358 + ], + "type": "text", + "content": "Wu, H., Kanda, N., Eskimez, S. E., and Li, J. Ts3-codec: Transformer-based simple streaming single codec. arXiv preprint arXiv:2411.18803, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 368, + 291, + 416 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 368, + 291, + 416 + ], + "spans": [ + { + "bbox": [ + 53, + 368, + 291, + 416 + ], + "type": "text", + "content": "Yang, D., Liu, S., Huang, R., Lei, G., Weng, C., Meng, H., and Yu, D. Instructts: Modelling expressive tts in discrete latent space with natural language style prompt. arXiv preprint arXiv:2301.13662, 2023a." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 426, + 291, + 473 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 426, + 291, + 473 + ], + "spans": [ + { + "bbox": [ + 53, + 426, + 291, + 473 + ], + "type": "text", + "content": "Yang, D., Liu, S., Huang, R., Tian, J., Weng, C., and Zou, Y. Hifi-codec: Group-residual vector quantization for high fidelity audio codec. arXiv preprint arXiv:2305.02765, 2023b." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 483, + 291, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 483, + 291, + 532 + ], + "spans": [ + { + "bbox": [ + 53, + 483, + 291, + 532 + ], + "type": "text", + "content": "Yang, D., Tian, J., Tan, X., Huang, R., Liu, S., Chang, X., Shi, J., Zhao, S., Bian, J., Wu, X., et al. Uniaudio: An audio foundation model toward universal audio generation. arXiv preprint arXiv:2310.00704, 2023c." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 541, + 291, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 541, + 291, + 590 + ], + "spans": [ + { + "bbox": [ + 53, + 541, + 291, + 590 + ], + "type": "text", + "content": "Yang, D., Yu, J., Wang, H., Wang, W., Weng, C., Zou, Y., and Yu, D. Diffsound: Discrete diffusion model for text-to-sound generation. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 2023d." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 600, + 291, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 600, + 291, + 647 + ], + "spans": [ + { + "bbox": [ + 53, + 600, + 291, + 647 + ], + "type": "text", + "content": "Yang, D., Guo, H., Wang, Y., Huang, R., Li, X., Tan, X., Wu, X., and Meng, H. Uniaudio 1.5: Large language model-driven audio codec is a few-shot audio task learner. arXiv preprint arXiv:2406.10056, 2024a." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 657, + 291, + 716 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 657, + 291, + 716 + ], + "spans": [ + { + "bbox": [ + 53, + 657, + 291, + 716 + ], + "type": "text", + "content": "Yang, D., Huang, R., Wang, Y., Guo, H., Chong, D., Liu, S., Wu, X., and Meng, H. Simplespeech 2: Towards simple and efficient text-to-speech with flow-based scalar latent transformer diffusion models. arXiv preprint arXiv:2408.13893, 2024b." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 306, + 67, + 542, + 481 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 306, + 67, + 542, + 115 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 67, + 542, + 115 + ], + "spans": [ + { + "bbox": [ + 306, + 67, + 542, + 115 + ], + "type": "text", + "content": "Yang, D., Wang, D., Guo, H., Chen, X., Wu, X., and Meng, H. Simplespeech: Towards simple and efficient text-to-speech with scalar latent transformer diffusion models. arXiv preprint arXiv:2406.02328, 2024c." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 123, + 542, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 123, + 542, + 171 + ], + "spans": [ + { + "bbox": [ + 306, + 123, + 542, + 171 + ], + "type": "text", + "content": "Yang, S.-w., Chi, P.-H., Chuang, Y.-S., Lai, C.-I. J., Lakhotia, K., Lin, Y. Y., Liu, A. T., Shi, J., Chang, X., Lin, G.-T., et al. Superb: Speech processing universal performance benchmark. arXiv preprint arXiv:2105.01051, 2021." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 178, + 542, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 178, + 542, + 225 + ], + "spans": [ + { + "bbox": [ + 306, + 178, + 542, + 225 + ], + "type": "text", + "content": "Yu, Q., Weber, M., Deng, X., Shen, X., Cremers, D., and Chen, L.-C. An image is worth 32 tokens for reconstruction and generation. arXiv preprint arXiv:2406.07550, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 235, + 542, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 235, + 542, + 282 + ], + "spans": [ + { + "bbox": [ + 306, + 235, + 542, + 282 + ], + "type": "text", + "content": "Zeghidour, N., Luebs, A., Omran, A., Skoglund, J., and Tagliasacchi, M. Soundstream: An end-to-end neural audio codec. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 30:495-507, 2021." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 290, + 542, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 290, + 542, + 337 + ], + "spans": [ + { + "bbox": [ + 306, + 290, + 542, + 337 + ], + "type": "text", + "content": "Zen, H., Dang, V., Clark, R., Zhang, Y., Weiss, R. J., Jia, Y., Chen, Z., and Wu, Y. Libritts: A corpus derived from librispeech for text-to-speech. arXiv preprint arXiv:1904.02882, 2019." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 346, + 542, + 394 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 346, + 542, + 394 + ], + "spans": [ + { + "bbox": [ + 306, + 346, + 542, + 394 + ], + "type": "text", + "content": "Zeng, A., Du, Z., Liu, M., Wang, K., Jiang, S., Zhao, L., Dong, Y., and Tang, J. Glm-4-voice: Towards intelligent and human-like end-to-end spoken chatbot. arXiv preprint arXiv:2412.02612, 2024." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 402, + 542, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 402, + 542, + 437 + ], + "spans": [ + { + "bbox": [ + 306, + 402, + 542, + 437 + ], + "type": "text", + "content": "Zhang, X., Zhang, D., Li, S., Zhou, Y., and Qiu, X. Speechtokenizer: Unified speech tokenizer for speech large language models. arXiv preprint arXiv:2308.16692, 2023." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 446, + 542, + 481 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 446, + 542, + 481 + ], + "spans": [ + { + "bbox": [ + 306, + 446, + 542, + 481 + ], + "type": "text", + "content": "Zhu, L., Wei, F., Lu, Y., and Chen, D. Scaling the codebook size of vqgan to 100,000 with a utilization rate of " + }, + { + "bbox": [ + 306, + 446, + 542, + 481 + ], + "type": "inline_equation", + "content": "99\\%" + }, + { + "bbox": [ + 306, + 446, + 542, + 481 + ], + "type": "text", + "content": ". arXiv preprint arXiv:2406.11837, 2024." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 127, + 45, + 467, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 45, + 467, + 57 + ], + "spans": [ + { + "bbox": [ + 127, + 45, + 467, + 57 + ], + "type": "text", + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 95, + 67, + 294, + 304 + ], + "blocks": [ + { + "bbox": [ + 95, + 67, + 294, + 304 + ], + "lines": [ + { + "bbox": [ + 95, + 67, + 294, + 304 + ], + "spans": [ + { + "bbox": [ + 95, + 67, + 294, + 304 + ], + "type": "image", + "image_path": "1c1b8a8c84d828412e41619aab208a4b72ed13470a39bdd8ec78692ea2ef9051.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 51, + 322, + 544, + 357 + ], + "lines": [ + { + "bbox": [ + 51, + 322, + 544, + 357 + ], + "spans": [ + { + "bbox": [ + 51, + 322, + 544, + 357 + ], + "type": "text", + "content": "Figure 4. The left diagram illustrates the framework of the audio language model, which includes a pre-trained LLM, a LoRA module, and a depth transformer. The audio language model can process both text and audio streaming inputs and generate corresponding text and audio outputs. The right diagram provides details of hierarchical audio modeling." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 322, + 64, + 504, + 310 + ], + "blocks": [ + { + "bbox": [ + 322, + 64, + 504, + 310 + ], + "lines": [ + { + "bbox": [ + 322, + 64, + 504, + 310 + ], + "spans": [ + { + "bbox": [ + 322, + 64, + 504, + 310 + ], + "type": "image", + "image_path": "6c46a09a51c2d1af91d3429bf4ad54706550bf382847ad3d6090b3a56ed70075.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 406, + 315, + 420 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 406, + 315, + 420 + ], + "spans": [ + { + "bbox": [ + 52, + 406, + 315, + 420 + ], + "type": "text", + "content": "A. The details of audio language model framework" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 426, + 544, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 426, + 544, + 498 + ], + "spans": [ + { + "bbox": [ + 52, + 426, + 544, + 498 + ], + "type": "text", + "content": "In this section, we provide details of the audio language model. We follow the framework of UniAudio (Yang et al., 2023c) and Moshi (Défossez et al., 2024), which combines a pre-trained LLM with a smaller Transformer model to predict audio tokens in a hierarchical manner. In their original paper, both the LLM and the small Transformer are updated during the training process. Due to resource limitations, and following (Hao et al., 2023), we incorporate LoRA (Hu et al., 2021) into the LLM model. For the LLM model, we use the LLAMA3.2 1B version. During training, we update only the LoRA module and the small Transformer." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 504, + 543, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 504, + 543, + 529 + ], + "spans": [ + { + "bbox": [ + 52, + 504, + 543, + 529 + ], + "type": "text", + "content": "LORA setting For the LoRA module, we add LoRA parameters to the self-attention and linear layers. We set " + }, + { + "bbox": [ + 52, + 504, + 543, + 529 + ], + "type": "inline_equation", + "content": "lora_{r} = 32" + }, + { + "bbox": [ + 52, + 504, + 543, + 529 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 52, + 504, + 543, + 529 + ], + "type": "inline_equation", + "content": "lora_{alpha} = 16" + }, + { + "bbox": [ + 52, + 504, + 543, + 529 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 534, + 543, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 534, + 543, + 557 + ], + "spans": [ + { + "bbox": [ + 52, + 534, + 543, + 557 + ], + "type": "text", + "content": "Depth Transformer setting For the depth transformer, we use 6 self-attention layer. We set the attention head number as 32. The attention dimension is the same as the LLAMA 3.2 1B." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 574, + 528, + 587 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 574, + 528, + 587 + ], + "spans": [ + { + "bbox": [ + 52, + 574, + 528, + 587 + ], + "type": "text", + "content": "B. The details of the influence of bitrate and semantic information for audio language model." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 594, + 543, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 594, + 543, + 632 + ], + "spans": [ + { + "bbox": [ + 52, + 594, + 543, + 632 + ], + "type": "text", + "content": "In this section, we provide details of the validation experiments to explore the influence of bitrate and semantic information on audio language models. Following AudioLM (Borsos et al., 2023a), we construct an audio token pre-training task similar to text pre-training, where the model is tasked with predicting the next audio token based on the previous token sequence." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 643, + 134, + 655 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 643, + 134, + 655 + ], + "spans": [ + { + "bbox": [ + 53, + 643, + 134, + 655 + ], + "type": "text", + "content": "B.1. Training data" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 52, + 662, + 523, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 662, + 523, + 674 + ], + "spans": [ + { + "bbox": [ + 52, + 662, + 523, + 674 + ], + "type": "text", + "content": "We conduct the experiments on 2000 hours speech data, these data is selected from MLS dataset (Pratap et al., 2020)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 687, + 114, + 698 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 687, + 114, + 698 + ], + "spans": [ + { + "bbox": [ + 53, + 687, + 114, + 698 + ], + "type": "text", + "content": "B.2. Test data" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 52, + 705, + 223, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 705, + 223, + 717 + ], + "spans": [ + { + "bbox": [ + 52, + 705, + 223, + 717 + ], + "type": "text", + "content": "We evaluate on LibriSpeech test clean set." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 127, + 45, + 468, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 45, + 468, + 56 + ], + "spans": [ + { + "bbox": [ + 127, + 45, + 468, + 56 + ], + "type": "text", + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 99, + 84, + 496, + 137 + ], + "blocks": [ + { + "bbox": [ + 145, + 74, + 449, + 84 + ], + "lines": [ + { + "bbox": [ + 145, + 74, + 449, + 84 + ], + "spans": [ + { + "bbox": [ + 145, + 74, + 449, + 84 + ], + "type": "text", + "content": "Table 8. The reconstruction performance of different frame rate of audio tokenizers." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 99, + 84, + 496, + 137 + ], + "lines": [ + { + "bbox": [ + 99, + 84, + 496, + 137 + ], + "spans": [ + { + "bbox": [ + 99, + 84, + 496, + 137 + ], + "type": "table", + "html": "
VersionBitrate (↓)FPS (↓)codebook sizePESQ (↑)UT-MOS (↑)VISQOL (↑)STOI (↑)
50hz1650bps5020482.223.693.630.86
25hz825bps2520482.073.563.610.83
12.5hz412.5bps12.520481.582.493.370.77
", + "image_path": "ebeb2a8c7b24539dd827c8b2160b82d162ceb27058d6262bf860960f2abb7cb7.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 232, + 126, + 242 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 232, + 126, + 242 + ], + "spans": [ + { + "bbox": [ + 52, + 232, + 126, + 242 + ], + "type": "text", + "content": "B.3. Framework" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 251, + 476, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 251, + 476, + 262 + ], + "spans": [ + { + "bbox": [ + 52, + 251, + 476, + 262 + ], + "type": "text", + "content": "We use the same framework as described in Section A; the difference is that we do not use text streaming." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 275, + 214, + 288 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 275, + 214, + 288 + ], + "spans": [ + { + "bbox": [ + 52, + 275, + 214, + 288 + ], + "type": "text", + "content": "B.4. Three Types of Audio Tokenizers" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 294, + 543, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 294, + 543, + 319 + ], + "spans": [ + { + "bbox": [ + 52, + 294, + 543, + 319 + ], + "type": "text", + "content": "Following the structure of MimiCodec (Défossez et al., 2024), we train three versions of the audio codec tokenizer. All of the audio codec models are trained on " + }, + { + "bbox": [ + 52, + 294, + 543, + 319 + ], + "type": "inline_equation", + "content": "24\\mathrm{kHz}" + }, + { + "bbox": [ + 52, + 294, + 543, + 319 + ], + "type": "text", + "content": " speech data. We train three versions of the audio codec models, as follows:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 323, + 542, + 409 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 52, + 323, + 542, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 323, + 542, + 348 + ], + "spans": [ + { + "bbox": [ + 52, + 323, + 542, + 348 + ], + "type": "text", + "content": "(V1) We set the down-sampling rate to [2, 5, 6, 8], resulting in a " + }, + { + "bbox": [ + 52, + 323, + 542, + 348 + ], + "type": "inline_equation", + "content": "50\\mathrm{Hz}" + }, + { + "bbox": [ + 52, + 323, + 542, + 348 + ], + "type": "text", + "content": " frame rate. We use three RVQ layers, and the codebook size is 2,048. The bitrate of this audio codec is 1.65 kbps." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 354, + 541, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 354, + 541, + 379 + ], + "spans": [ + { + "bbox": [ + 52, + 354, + 541, + 379 + ], + "type": "text", + "content": "(V2) We set the down-sampling rate to [4, 5, 6, 8], resulting in a " + }, + { + "bbox": [ + 52, + 354, + 541, + 379 + ], + "type": "inline_equation", + "content": "25\\mathrm{Hz}" + }, + { + "bbox": [ + 52, + 354, + 541, + 379 + ], + "type": "text", + "content": " frame rate. We use three RVQ layers, and the codebook size is 2,048. The bitrate of this audio codec is 825 bps." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 383, + 541, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 383, + 541, + 409 + ], + "spans": [ + { + "bbox": [ + 52, + 383, + 541, + 409 + ], + "type": "text", + "content": "(V3) We set the down-sampling rate to [2, 4, 5, 6, 8], resulting in a " + }, + { + "bbox": [ + 52, + 383, + 541, + 409 + ], + "type": "inline_equation", + "content": "12.5\\mathrm{Hz}" + }, + { + "bbox": [ + 52, + 383, + 541, + 409 + ], + "type": "text", + "content": " frame rate. We use three RVQ layers, and the codebook size is 2,048. The bitrate of this audio codec is 412.5 bps." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 52, + 413, + 543, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 413, + 543, + 450 + ], + "spans": [ + { + "bbox": [ + 52, + 413, + 543, + 450 + ], + "type": "text", + "content": "Note that the original MimiCodec is trained with distillation loss from WavLM; we do not add this loss during the training of our audio tokenizer. Therefore, these three audio tokenizers do not include any semantic information. Table 8 shows the reconstruction performance of the three audio tokenizers." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 52, + 463, + 159, + 474 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 463, + 159, + 474 + ], + "spans": [ + { + "bbox": [ + 52, + 463, + 159, + 474 + ], + "type": "text", + "content": "B.5. Semantic Tokenizer" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 52, + 481, + 543, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 481, + 543, + 540 + ], + "spans": [ + { + "bbox": [ + 52, + 481, + 543, + 540 + ], + "type": "text", + "content": "The previous three audio codec tokenizers do not consider semantic information. To evaluate the importance of semantic information, we follow WhisperSpeech5 to build a Whisper-based semantic tokenizer. Specifically, we follow the training code of WhisperSpeech, using two down-sampling layers to compress the Whisper encoder's features into a " + }, + { + "bbox": [ + 52, + 481, + 543, + 540 + ], + "type": "inline_equation", + "content": "12.5\\mathrm{Hz}" + }, + { + "bbox": [ + 52, + 481, + 543, + 540 + ], + "type": "text", + "content": " frame rate, and then we add three RVQ layers to quantize them. Thus, this semantic tokenizer has the same bitrate as the V3 audio tokenizer." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 52, + 554, + 156, + 565 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 554, + 156, + 565 + ], + "spans": [ + { + "bbox": [ + 52, + 554, + 156, + 565 + ], + "type": "text", + "content": "B.6. Evaluation metrics" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 52, + 573, + 331, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 573, + 331, + 586 + ], + "spans": [ + { + "bbox": [ + 52, + 573, + 331, + 586 + ], + "type": "text", + "content": "We evaluate the pre-training performance from the following aspects:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 52, + 590, + 543, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 590, + 543, + 628 + ], + "spans": [ + { + "bbox": [ + 52, + 590, + 543, + 628 + ], + "type": "text", + "content": "Training efficiency: As is well known, the space complexity of a transformer is " + }, + { + "bbox": [ + 52, + 590, + 543, + 628 + ], + "type": "inline_equation", + "content": "O(T^2)" + }, + { + "bbox": [ + 52, + 590, + 543, + 628 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 52, + 590, + 543, + 628 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 52, + 590, + 543, + 628 + ], + "type": "text", + "content": " is the sequence length. A low-bitrate audio tokenizer can compress the audio signal into a few token sequences, thereby improving training efficiency. For all experiments, we use the same GPU machine to train the model and record the statistical training duration." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 52, + 632, + 543, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 632, + 543, + 669 + ], + "spans": [ + { + "bbox": [ + 52, + 632, + 543, + 669 + ], + "type": "text", + "content": "Inference efficiency: Similarly, a low-bitrate audio tokenizer can improve inference efficiency, as it requires fewer inference steps. We use the Real-Time Factor (RTF) to assess inference efficiency. Note that for all experiments, we do not use any inference optimization tricks, such as KV cache." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 52, + 673, + 543, + 699 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 673, + 543, + 699 + ], + "spans": [ + { + "bbox": [ + 52, + 673, + 543, + 699 + ], + "type": "text", + "content": "Validation loss and perplexity: Following text LLMs (OpenAI, 2023), we use validation loss and perplexity to evaluate model performance." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 127, + 45, + 467, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 45, + 467, + 56 + ], + "spans": [ + { + "bbox": [ + 127, + 45, + 467, + 56 + ], + "type": "text", + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 64, + 705, + 254, + 718 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 705, + 254, + 718 + ], + "spans": [ + { + "bbox": [ + 64, + 705, + 254, + 718 + ], + "type": "text", + "content": "5https://github.com/WhisperSpeech/WhisperSpeech" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 167, + 69, + 430, + 281 + ], + "blocks": [ + { + "bbox": [ + 167, + 69, + 430, + 281 + ], + "lines": [ + { + "bbox": [ + 167, + 69, + 430, + 281 + ], + "spans": [ + { + "bbox": [ + 167, + 69, + 430, + 281 + ], + "type": "image", + "image_path": "d94fab77d52195e7058b5d482d8f7f1f1f1533b9c9ca7a8d3dd5363564b5f2ed.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 144, + 297, + 451, + 309 + ], + "lines": [ + { + "bbox": [ + 144, + 297, + 451, + 309 + ], + "spans": [ + { + "bbox": [ + 144, + 297, + 451, + 309 + ], + "type": "text", + "content": "Figure 5. The performance comparison with different window size during inference." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 86, + 346, + 509, + 398 + ], + "blocks": [ + { + "bbox": [ + 166, + 335, + 428, + 346 + ], + "lines": [ + { + "bbox": [ + 166, + 335, + 428, + 346 + ], + "spans": [ + { + "bbox": [ + 166, + 335, + 428, + 346 + ], + "type": "text", + "content": "Table 9. The influence of codebook size for reconstruction performance." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 86, + 346, + 509, + 398 + ], + "lines": [ + { + "bbox": [ + 86, + 346, + 509, + 398 + ], + "spans": [ + { + "bbox": [ + 86, + 346, + 509, + 398 + ], + "type": "table", + "html": "
Codebook SizePESQ (↑)UT-MOS (↑)VISQOL (↑)STOI (↑)STFT loss (↓)Token utilization (↑)
20482.03.763.780.811.20100%
10241.833.663.650.801.14100%
5121.693.643.580.7921.18100%
", + "image_path": "17f3ab99fd1a09e4e9beee635e4fd0043664ff45bf148bb33396b5a7d9c15ef7.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 423, + 146, + 437 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 423, + 146, + 437 + ], + "spans": [ + { + "bbox": [ + 52, + 423, + 146, + 437 + ], + "type": "text", + "content": "C. Ablation study" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 444, + 281, + 455 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 444, + 281, + 455 + ], + "spans": [ + { + "bbox": [ + 52, + 444, + 281, + 455 + ], + "type": "text", + "content": "C.1. The influence of window size for ALMTokenizer" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 462, + 543, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 462, + 543, + 522 + ], + "spans": [ + { + "bbox": [ + 52, + 462, + 543, + 522 + ], + "type": "text", + "content": "As discussed in the previous section, the proposed ALMTokensizer supports a dynamic compression rate by changing the window size " + }, + { + "bbox": [ + 52, + 462, + 543, + 522 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 52, + 462, + 543, + 522 + ], + "type": "text", + "content": ". Figure 5 shows the comparison of reconstruction performance with different window sizes. We observe that using a smaller window size results in better reconstruction performance, but it also increases the bitrate. For example, if the window size is 2, the bitrate is 1237.5bps, window size is 6, the bitrate is 412.5. It also shows the advantages of proposed method: we can dynamically change the frame rate during the inference by setting different window size." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 536, + 205, + 547 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 536, + 205, + 547 + ], + "spans": [ + { + "bbox": [ + 52, + 536, + 205, + 547 + ], + "type": "text", + "content": "C.2. The influence of codebook size" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 554, + 544, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 554, + 544, + 590 + ], + "spans": [ + { + "bbox": [ + 52, + 554, + 544, + 590 + ], + "type": "text", + "content": "We explore three different codebook sizes: 512, 1024, and 2048. To align with the setting of MimiCodec (Défossez et al., 2024), we set the max codebook size as 2048. The results, as shown in Table 9, are presented. We observe that scaling the codebook size improves reconstruction performance. Furthermore, we also find that almost all tokens have been used." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 52, + 603, + 326, + 615 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 603, + 326, + 615 + ], + "spans": [ + { + "bbox": [ + 52, + 603, + 326, + 615 + ], + "type": "text", + "content": "C.3. The influence of model size for reconstruction performance" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 52, + 622, + 544, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 622, + 544, + 717 + ], + "spans": [ + { + "bbox": [ + 52, + 622, + 544, + 717 + ], + "type": "text", + "content": "To explore the influence of model size on reconstruction performance, we set up two configurations: (1) We use 24 self-attention layers for both the transformer encoder and transformer decoder, resulting in 174M parameters. (2) We use 12 self-attention layers for both the transformer encoder and transformer decoder, resulting in 87M parameters. In both settings, we keep the Patchify module the same size, as it consists of several convolutional layers, and its total parameters are small. The experimental results, as shown in Table 10, indicate that using a larger model can improve reconstruction but also increases computational resource consumption (higher RTF). Previous work, StableCodec (Parker et al., 2024), shows that scaling the codec model to 1B parameters can lead to better performance. Due to computational resource limitations, we leave scaling to a larger model size for future work." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 127, + 45, + 467, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 45, + 467, + 56 + ], + "spans": [ + { + "bbox": [ + 127, + 45, + 467, + 56 + ], + "type": "text", + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 105, + 84, + 491, + 127 + ], + "blocks": [ + { + "bbox": [ + 178, + 74, + 416, + 84 + ], + "lines": [ + { + "bbox": [ + 178, + 74, + 416, + 84 + ], + "spans": [ + { + "bbox": [ + 178, + 74, + 416, + 84 + ], + "type": "text", + "content": "Table 10. The influence of model for reconstruction performance." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 105, + 84, + 491, + 127 + ], + "lines": [ + { + "bbox": [ + 105, + 84, + 491, + 127 + ], + "spans": [ + { + "bbox": [ + 105, + 84, + 491, + 127 + ], + "type": "table", + "html": "
SettingPESQ (↑)UT-MOS (↑)VISQOL (↑)STOI (↑)Model size (↓)RTF (↓)
24 attention layer2.03.763.780.811740.031
12 attention layer1.873.573.700.79870.019
", + "image_path": "65e54bea32de06f51d9c5ce9da1e1a2189806386fd5b23c8a2f36505d83d130b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 145, + 244, + 156 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 145, + 244, + 156 + ], + "spans": [ + { + "bbox": [ + 52, + 145, + 244, + 156 + ], + "type": "text", + "content": "C.4. The influence of mask-rate in MAE loss" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 163, + 543, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 163, + 543, + 200 + ], + "spans": [ + { + "bbox": [ + 51, + 163, + 543, + 200 + ], + "type": "text", + "content": "Inspired by MAE(He et al., 2022), we tested three groups of mask rates ranges: (10–20%), (20–30%), and (30–40%). The experiments as following Table shows. Results indicate that higher rates (30–40%) benefit semantics but harm reconstruction, leading us to adopt an intermediate range (20–30%)." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 118, + 227, + 476, + 278 + ], + "blocks": [ + { + "bbox": [ + 203, + 218, + 391, + 227 + ], + "lines": [ + { + "bbox": [ + 203, + 218, + 391, + 227 + ], + "spans": [ + { + "bbox": [ + 203, + 218, + 391, + 227 + ], + "type": "text", + "content": "Table 11. The influence of mask-rate for MAE loss." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 118, + 227, + 476, + 278 + ], + "lines": [ + { + "bbox": [ + 118, + 227, + 476, + 278 + ], + "spans": [ + { + "bbox": [ + 118, + 227, + 476, + 278 + ], + "type": "table", + "html": "
mask rate rangeUTMOSDNSMOSVISQOLPESQSTOIASRER
10-20%3.773.623.802.00.8118.727.7
20-30%3.763.643.782.00.8118.329.0
30-40%3.363.063.311.580.7718.129.6
", + "image_path": "105b727952b5ca74e95cd96f0079e5c39919e423b44aa967df89a083ef0b942e.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 304, + 126, + 316 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 304, + 126, + 316 + ], + "spans": [ + { + "bbox": [ + 52, + 304, + 126, + 316 + ], + "type": "text", + "content": "D. Evaluation" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 325, + 542, + 349 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 325, + 542, + 349 + ], + "spans": [ + { + "bbox": [ + 52, + 325, + 542, + 349 + ], + "type": "text", + "content": "We evaluate the performance of the previous SOTA audio tokenizers and our proposed ALMTokensizer across audio reconstruction, audio semantic information, audio understanding, and audio generation tasks." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 361, + 168, + 372 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 361, + 168, + 372 + ], + "spans": [ + { + "bbox": [ + 52, + 361, + 168, + 372 + ], + "type": "text", + "content": "D.1. Audio Reconstruction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 52, + 380, + 542, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 380, + 542, + 404 + ], + "spans": [ + { + "bbox": [ + 52, + 380, + 542, + 404 + ], + "type": "text", + "content": "For speech data, we use DNS-MOS (Reddy et al., 2022), UT-MOS (Saeki et al., 2022), PESQ, STOI (Short-Time Objective Intelligibility), VISQOL (speech version), and STFT loss as metrics." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 52, + 411, + 543, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 411, + 543, + 458 + ], + "spans": [ + { + "bbox": [ + 52, + 411, + 543, + 458 + ], + "type": "text", + "content": "For sound and music data, we use VISQOL (audio version), STFT loss, and Mel loss. Furthermore, following (Kumar et al., 2023), we conduct the MUSHRA subjective test for speech, sound, and music. Specifically, we hire 10 audio-related researchers to conduct the MOS evaluation. We ask the listeners to rate each audio, with scores ranging from 0 to 100. Refer to D.5 for the details." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 52, + 464, + 543, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 464, + 543, + 513 + ], + "spans": [ + { + "bbox": [ + 52, + 464, + 543, + 513 + ], + "type": "text", + "content": "Evaluation Datasets: For speech data, we evaluate on a subset of VCTK (Veaux et al., 2017) (200 speech utterances) and a subset of the LibriTTS test clean set (Zen et al., 2019) (400 speech utterances). For sound data, we evaluate on a subset of the AudioCaps validation set (Kim et al., 2019) (200 sound utterances). For music data, we evaluate on a subset of the MusicCaps (Agostinelli et al., 2023) dataset (200 music utterances)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 52, + 525, + 197, + 536 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 525, + 197, + 536 + ], + "spans": [ + { + "bbox": [ + 52, + 525, + 197, + 536 + ], + "type": "text", + "content": "D.2. Audio Semantic Information" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 52, + 544, + 543, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 544, + 543, + 605 + ], + "spans": [ + { + "bbox": [ + 52, + 544, + 543, + 605 + ], + "type": "text", + "content": "Previous SSL models, such as Hubert (Hsu et al., 2021) and WavLM (Chen et al., 2022a), have shown that semantic-rich representations can be used to solve downstream recognition tasks by fine-tuning several adaptor layers. Inspired by these works, we propose evaluating the performance of the audio tokenizer for downstream recognition tasks. We use the quantized features of the audio tokenizer as the input for downstream tasks. We follow two popular benchmarks: SUPERB (Yang et al., 2021) and ARCH (La Quatra et al., 2024)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 52, + 609, + 543, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 609, + 543, + 658 + ], + "spans": [ + { + "bbox": [ + 52, + 609, + 543, + 658 + ], + "type": "text", + "content": "For speech data, we conduct the automatic speech recognition (ASR) task on the LibriSpeech (Panayotov et al., 2015) dataset and the emotion classification (EC) task on the EMOVO (Costantini et al., 2014) dataset. For the ASR task, we train on the LibriSpeech train-100 set and evaluate on the LibriSpeech test clean set. For the EC task, we follow ARCH (La Quatra et al., 2024) to split the training and test sets." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 52, + 663, + 543, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 663, + 543, + 700 + ], + "spans": [ + { + "bbox": [ + 52, + 663, + 543, + 700 + ], + "type": "text", + "content": "For sound data, we conduct the sound classification task on the ESC-50 dataset (Piczak, 2015). For music data, we conduct the music classification task on the Medley-Solos-DB dataset (Lostanlen & Cella, 2016). For both tasks, we follow the ARCH benchmarking settings to split the training and test sets." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 52, + 705, + 543, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 705, + 543, + 718 + ], + "spans": [ + { + "bbox": [ + 52, + 705, + 543, + 718 + ], + "type": "text", + "content": "For all experiments, we train for 10 epochs with the same learning rate and batch size. For the automatic speech recognition" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 127, + 45, + 467, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 45, + 467, + 56 + ], + "spans": [ + { + "bbox": [ + 127, + 45, + 467, + 56 + ], + "type": "text", + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 68, + 510, + 79 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 68, + 510, + 79 + ], + "spans": [ + { + "bbox": [ + 52, + 68, + 510, + 79 + ], + "type": "text", + "content": "task, we use word error rate (WER) as the metric. For the other classification tasks, we use accuracy as the metric." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 92, + 214, + 105 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 92, + 214, + 105 + ], + "spans": [ + { + "bbox": [ + 52, + 92, + 214, + 105 + ], + "type": "text", + "content": "D.3. LM-based Audio Understanding" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 111, + 543, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 111, + 543, + 171 + ], + "spans": [ + { + "bbox": [ + 52, + 111, + 543, + 171 + ], + "type": "text", + "content": "Overview To further validate whether the audio tokenizer is suitable for building an audio language model, we propose conducting an audio understanding task using discrete tokens as input. We conduct three tasks: automatic speech recognition (ASR), audio captioning, and music captioning. We use the framework introduced in Section A. For audio data, we use the audio tokenizer to encode it as discrete tokens; for text data, we use the BPE tokenizer of LLAMA 3.2. We construct the sequence as [audio token, text token], then the model is asked to predict the text token based on the previous audio token." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 177, + 543, + 249 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 177, + 543, + 249 + ], + "spans": [ + { + "bbox": [ + 52, + 177, + 543, + 249 + ], + "type": "text", + "content": "Training Data For the ASR task, we select 2,000 hours of LibriHeavy speech data (Kang et al., 2024). For the audio captioning tasks, we use AudioCaps (Kim et al., 2019) and BBC sound effects (Mei et al., 2023). For the BBC sound effects, we cut off the first 10 seconds of audio if the utterance duration is greater than 10 seconds. Finally, we obtain about 500 hours of sound data. For the music captioning task, we use a subset of the Million Song dataset. We cut off the first 10 seconds of music data for each utterance, which results in about 500 hours of music data. For the corresponding captions, we use LPMusicCaps (Doh et al., 2023)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 255, + 542, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 255, + 542, + 291 + ], + "spans": [ + { + "bbox": [ + 52, + 255, + 542, + 291 + ], + "type": "text", + "content": "Test Data For the ASR task, we evaluate on the LibriSpeech test clean set. For the audio captioning task, we evaluate on the AudioCaps dataset (Kim et al., 2019). For the music captioning task, we evaluate on the MusicCaps dataset (Agostinelli et al., 2023)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 297, + 543, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 297, + 543, + 321 + ], + "spans": [ + { + "bbox": [ + 52, + 297, + 543, + 321 + ], + "type": "text", + "content": "Metrics Similarly, we use WER as the evaluation metric for the ASR task. For audio and music captioning, we follow (Drossos et al., 2020) and adopt BLEU-1, BLEU-2, BLEU-3, METEOR, ROUGE-L, CIDEr-D, SPICE, and SPIDER metrics." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 326, + 502, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 326, + 502, + 339 + ], + "spans": [ + { + "bbox": [ + 52, + 326, + 502, + 339 + ], + "type": "text", + "content": "Inference Setting For inference, we directly use the top-k sampling strategy and set " + }, + { + "bbox": [ + 52, + 326, + 502, + 339 + ], + "type": "inline_equation", + "content": "k = 30" + }, + { + "bbox": [ + 52, + 326, + 502, + 339 + ], + "type": "text", + "content": " for all experiments." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 351, + 198, + 363 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 351, + 198, + 363 + ], + "spans": [ + { + "bbox": [ + 52, + 351, + 198, + 363 + ], + "type": "text", + "content": "D.4. LM-based Audio Generation" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 370, + 543, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 370, + 543, + 406 + ], + "spans": [ + { + "bbox": [ + 52, + 370, + 543, + 406 + ], + "type": "text", + "content": "We also perform audio generation tasks, including text-to-speech, text-to-sound, and text-to-music generation. Similarly, we construct the sequence as [text token, audio token], then the model is asked to predict the audio token based on the previous text token." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 52, + 412, + 443, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 412, + 443, + 425 + ], + "spans": [ + { + "bbox": [ + 52, + 412, + 443, + 425 + ], + "type": "text", + "content": "Training and Test Data We use the same training and test data as the audio comprehension task." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 52, + 430, + 543, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 430, + 543, + 466 + ], + "spans": [ + { + "bbox": [ + 52, + 430, + 543, + 466 + ], + "type": "text", + "content": "Metrics For TTS evaluation, we use WER to evaluate robustness, and UTMOS and DNSMOS are used to assess speech quality. For text-to-sound and text-to-music, we follow previous works AudioGen (Kreuk et al., 2022), using Fréchet Audio Distance (FAD), Kullback-Leibler (KL) Divergence, and Fréchet Distance (FD) for audio fidelity and similarity." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 52, + 472, + 522, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 472, + 522, + 484 + ], + "spans": [ + { + "bbox": [ + 52, + 472, + 522, + 484 + ], + "type": "text", + "content": "Inference Setting During the inference stage, we use the top-k sampling strategy and set " + }, + { + "bbox": [ + 52, + 472, + 522, + 484 + ], + "type": "inline_equation", + "content": "k = 30" + }, + { + "bbox": [ + 52, + 472, + 522, + 484 + ], + "type": "text", + "content": " for all experiments." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 52, + 497, + 173, + 509 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 497, + 173, + 509 + ], + "spans": [ + { + "bbox": [ + 52, + 497, + 173, + 509 + ], + "type": "text", + "content": "D.5. Subjective Evaluations" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 52, + 516, + 543, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 516, + 543, + 599 + ], + "spans": [ + { + "bbox": [ + 52, + 516, + 543, + 599 + ], + "type": "text", + "content": "For the subjective evaluations, we adopt the approach used in previous works (Kumar et al., 2023; Parker et al., 2024) and use the MUSHRA format without a hidden anchor. Listeners are asked to compare multiple versions of an example simultaneously, including both a labeled reference and a hidden reference. They are given the following instructions: \"Please assess the quality similarity between an audio sample and its reference. Listen carefully to the reference audio, then rate the quality of each test clip in comparison. A score of 0 indicates no resemblance to the reference, while a score of 100 means it is identical to the reference.\" We randomly select 10 samples from each category (speech, music, and sound) in the test set, ensuring that each sample receives 10 ratings." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 52, + 614, + 204, + 627 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 614, + 204, + 627 + ], + "spans": [ + { + "bbox": [ + 52, + 614, + 204, + 627 + ], + "type": "text", + "content": "E. Audio Tokenizer Baselines" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 52, + 635, + 543, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 635, + 543, + 659 + ], + "spans": [ + { + "bbox": [ + 52, + 635, + 543, + 659 + ], + "type": "text", + "content": "To make a fair comparison, we classify the audio tokenizers into two types: (1) speech-based tokenizers, which are trained on speech datasets, and (2) audio-based tokenizers, which are trained on speech, sound, and music datasets." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 52, + 672, + 149, + 684 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 672, + 149, + 684 + ], + "spans": [ + { + "bbox": [ + 52, + 672, + 149, + 684 + ], + "type": "text", + "content": "E.1. Speech Tokenizer" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 52, + 691, + 194, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 691, + 194, + 704 + ], + "spans": [ + { + "bbox": [ + 52, + 691, + 194, + 704 + ], + "type": "text", + "content": "For speech data, we compare with:" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 127, + 45, + 467, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 45, + 467, + 56 + ], + "spans": [ + { + "bbox": [ + 127, + 45, + 467, + 56 + ], + "type": "text", + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 56, + 85, + 538, + 178 + ], + "blocks": [ + { + "bbox": [ + 53, + 74, + 512, + 84 + ], + "lines": [ + { + "bbox": [ + 53, + 74, + 512, + 84 + ], + "spans": [ + { + "bbox": [ + 53, + 74, + 512, + 84 + ], + "type": "text", + "content": "Table 12. The performance comparison on LibriTTS test clean. Bold for the best result and underline for the second-best result." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 56, + 85, + 538, + 178 + ], + "lines": [ + { + "bbox": [ + 56, + 85, + 538, + 178 + ], + "spans": [ + { + "bbox": [ + 56, + 85, + 538, + 178 + ], + "type": "table", + "html": "
ModelsFPS/TPSCS/BRReconstructionEfficiency
UTMOS (↑)DNS-MOS (↑)VISQOL (↑)STOI (↑)PESQ (↑)Model size (M) (↓)RTF (↓)
Encodec50/4001024/6kbps3.303.763.950.942.72140.019
Encodec50/1501024/1.5kbps2.023.273.830.881.79140.019
DAC50/1501024/1.5kbps2.613.363.850.891.96710.026
Wavtokenizer40/404096/0.48kbps3.653.613.800.871.81770.017
StableCodec25/2546656/0.4kbps4.203.743.510.881.859500.039
MimiCodec (3 RVQ)12.5/37.52048/0.41kbps2.823.283.340.831.4075.60.023
ALMTokensizer (Ours)12.5/37.52048/0.41kbps3.683.643.900.901.921740.031
", + "image_path": "847af1e78f02e93f03fa836136aadb7e2bfa7fe3f787b38cbddd0915c8294463.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 211, + 543, + 355 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 51, + 211, + 542, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 211, + 542, + 235 + ], + "spans": [ + { + "bbox": [ + 51, + 211, + 542, + 235 + ], + "type": "text", + "content": "(1) Encodec (Defossez et al., 2022), a SOTA audio codec model trained on large-scale speech, sound, and music datasets. The official open-sourced " + }, + { + "bbox": [ + 51, + 211, + 542, + 235 + ], + "type": "inline_equation", + "content": "24\\mathrm{kHz}" + }, + { + "bbox": [ + 51, + 211, + 542, + 235 + ], + "type": "text", + "content": " version is used." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 241, + 543, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 241, + 543, + 266 + ], + "spans": [ + { + "bbox": [ + 51, + 241, + 543, + 266 + ], + "type": "text", + "content": "(2) DAC-Codec (Kumar et al., 2023), which offers very high reconstruction performance. It is trained on large-scale speech, sound, and music datasets. The official open-sourced " + }, + { + "bbox": [ + 51, + 241, + 543, + 266 + ], + "type": "inline_equation", + "content": "24\\mathrm{kHz}" + }, + { + "bbox": [ + 51, + 241, + 543, + 266 + ], + "type": "text", + "content": " version is used." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 271, + 541, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 271, + 541, + 295 + ], + "spans": [ + { + "bbox": [ + 51, + 271, + 541, + 295 + ], + "type": "text", + "content": "(3) MimiCodec (Défossez et al., 2024), a SOTA low-bitrate speech codec model trained on a large-scale speech dataset. The sampling rate is " + }, + { + "bbox": [ + 51, + 271, + 541, + 295 + ], + "type": "inline_equation", + "content": "24\\mathrm{kHz}" + }, + { + "bbox": [ + 51, + 271, + 541, + 295 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 301, + 541, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 301, + 541, + 326 + ], + "spans": [ + { + "bbox": [ + 51, + 301, + 541, + 326 + ], + "type": "text", + "content": "(4) SpeechTokenizer (Zhang et al., 2023), a semantic-rich speech codec model trained on a large-scale speech dataset. The sampling rate is " + }, + { + "bbox": [ + 51, + 301, + 541, + 326 + ], + "type": "inline_equation", + "content": "16\\mathrm{kHz}" + }, + { + "bbox": [ + 51, + 301, + 541, + 326 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 331, + 541, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 331, + 541, + 355 + ], + "spans": [ + { + "bbox": [ + 51, + 331, + 541, + 355 + ], + "type": "text", + "content": "(5) WavTokenizer (Ji et al., 2024), an audio codec tokenizer trained on large-scale speech, sound, and music datasets. The sampling rate is " + }, + { + "bbox": [ + 51, + 331, + 541, + 355 + ], + "type": "inline_equation", + "content": "24\\mathrm{kHz}" + }, + { + "bbox": [ + 51, + 331, + 541, + 355 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 51, + 361, + 541, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 361, + 541, + 384 + ], + "spans": [ + { + "bbox": [ + 51, + 361, + 541, + 384 + ], + "type": "text", + "content": "To make a fair comparison, for Encodec, DAC-Codec, and SpeechTokenizer, we use the first three RVQ layers to control the bitrate during inference." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 52, + 398, + 145, + 409 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 398, + 145, + 409 + ], + "spans": [ + { + "bbox": [ + 52, + 398, + 145, + 409 + ], + "type": "text", + "content": "E.2. Audio Tokenizer" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 51, + 416, + 542, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 416, + 542, + 441 + ], + "spans": [ + { + "bbox": [ + 51, + 416, + 542, + 441 + ], + "type": "text", + "content": "For sound and music data, we compare with Encodec, DAC-Codec, and WavTokenizer. These three models are trained on large-scale speech, sound, and music datasets." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 52, + 453, + 148, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 453, + 148, + 464 + ], + "spans": [ + { + "bbox": [ + 52, + 453, + 148, + 464 + ], + "type": "text", + "content": "E.3. Semantic Models" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 51, + 472, + 543, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 472, + 543, + 509 + ], + "spans": [ + { + "bbox": [ + 51, + 472, + 543, + 509 + ], + "type": "text", + "content": "Furthermore, to evaluate the performance of semantic information, we also introduce several SSL-based models. For speech, we use WavLM (Chen et al., 2022a) and HuBERT (Hsu et al., 2021). For sound and music, we use BEATs (Chen et al., 2022b) and Wav2Vec2-AudioSet " + }, + { + "bbox": [ + 51, + 472, + 543, + 509 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 51, + 472, + 543, + 509 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 52, + 524, + 299, + 537 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 524, + 299, + 537 + ], + "spans": [ + { + "bbox": [ + 52, + 524, + 299, + 537 + ], + "type": "text", + "content": "F. More audio tokenizer evaluation experiments" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 52, + 544, + 265, + 556 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 544, + 265, + 556 + ], + "spans": [ + { + "bbox": [ + 52, + 544, + 265, + 556 + ], + "type": "text", + "content": "F.1. The subjective evaluation for audio tokenizer" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 52, + 563, + 321, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 563, + 321, + 574 + ], + "spans": [ + { + "bbox": [ + 52, + 563, + 321, + 574 + ], + "type": "text", + "content": "Table 7 shows the subjective evaluation results for audio tokenizer." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 52, + 588, + 249, + 599 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 588, + 249, + 599 + ], + "spans": [ + { + "bbox": [ + 52, + 588, + 249, + 599 + ], + "type": "text", + "content": "F.2. Evaluation results on LibriTTS test clean" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 51, + 607, + 542, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 607, + 542, + 642 + ], + "spans": [ + { + "bbox": [ + 51, + 607, + 542, + 642 + ], + "type": "text", + "content": "We report the reconstruction performance evaluated on a subset of the LibriTTS test clean set, where we randomly select 400 speech utterances. Additionally, we calculate the Real-Time Factor (RTF) and model size to assess efficiency. For RTF evaluation, we use an NVIDIA A100 GPU to evaluate all models." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 52, + 656, + 167, + 667 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 656, + 167, + 667 + ], + "spans": [ + { + "bbox": [ + 52, + 656, + 167, + 667 + ], + "type": "text", + "content": "F.3. Length generalization" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 51, + 674, + 542, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 674, + 542, + 698 + ], + "spans": [ + { + "bbox": [ + 51, + 674, + 542, + 698 + ], + "type": "text", + "content": "StableCodec (Parker et al., 2024) highlights that the introduction of transformer-based architectures can lead to the length generalization problem. For instance, the training data of ALMTokenizer consists of 5-second segments, whereas the test" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 127, + 45, + 467, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 45, + 467, + 56 + ], + "spans": [ + { + "bbox": [ + 127, + 45, + 467, + 56 + ], + "type": "text", + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 64, + 705, + 266, + 717 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 705, + 266, + 717 + ], + "spans": [ + { + "bbox": [ + 64, + 705, + 266, + 717 + ], + "type": "text", + "content": "6https://huggingface.co/ALM/wav2vec2-large-audioset" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 92, + 95, + 504, + 291 + ], + "blocks": [ + { + "bbox": [ + 52, + 74, + 542, + 95 + ], + "lines": [ + { + "bbox": [ + 52, + 74, + 542, + 95 + ], + "spans": [ + { + "bbox": [ + 52, + 74, + 542, + 95 + ], + "type": "text", + "content": "Table 13. Objective metrics for the ALMTokenizer and baselines, evaluated on utterances from length 4s to 10s, showing generalization of models across lengths" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 92, + 95, + 504, + 291 + ], + "lines": [ + { + "bbox": [ + 92, + 95, + 504, + 291 + ], + "spans": [ + { + "bbox": [ + 92, + 95, + 504, + 291 + ], + "type": "table", + "html": "
ModelFPSTPSBitratePESQ (↑)UT-MOS (↑)VISQOL (↑)STOI (↑)DNSMOS (↑)
4 seconds
Encodec501501.5kbps1.972.643.620.803.26
DAC501501.5kbps2.13.173.650.813.26
Ours12.537.50.41kbps1.843.633.690.793.41
6 seconds
Encodec501501.5kbps1.972.543.630.813.26
DAC501501.5kbps2.03.113.650.813.28
Ours12.537.50.41kbps1.893.663.750.813.62
8 seconds
Encodec501501.5kbps1.962.523.630.813.34
DAC501501.5kbps2.13.183.660.813.28
Ours12.537.50.41kbps1.953.553.740.813.66
10 seconds
Encodec501501.5kbps1.952.533.650.813.32
DAC501501.5kbps2.12.193.670.813.25
Ours12.537.50.41kbps1.963.543.730.813.66
", + "image_path": "f019bb22a5fffe4804a7ec41e4db6b17b6e1f070f9aabb5852f94021dc8b83bd.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 314, + 543, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 314, + 543, + 376 + ], + "spans": [ + { + "bbox": [ + 51, + 314, + 543, + 376 + ], + "type": "text", + "content": "data comprises segments of varying durations. We evaluate the model across four distinct length levels: 4, 6, 8, and 10 seconds. Encodec and DAC are selected as baselines due to their reliance on convolutional layers, which demonstrate robustness to variable input lengths. As shown in Table 13, the evaluation results indicate that ALMTokensizer effectively handles inference across these diverse lengths. These findings suggest that ALMTokensizer exhibits strong generalization capabilities with respect to input length variation." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 387, + 282, + 399 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 387, + 282, + 399 + ], + "spans": [ + { + "bbox": [ + 52, + 387, + 282, + 399 + ], + "type": "text", + "content": "F.4. Compared to diffusion-based audio codec models" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 406, + 544, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 406, + 544, + 574 + ], + "spans": [ + { + "bbox": [ + 51, + 406, + 544, + 574 + ], + "type": "text", + "content": "We compare ALMTokens with an alternative family of audio tokenizers that leverage discrete semantic tokens derived from self-supervised pre-trained (SSL) models (e.g., Hubert (Hsu et al., 2021), WavLM (Chen et al., 2022a), AudioMAE (Huang et al., 2022)). These models first quantize the SSL features into semantic tokens and subsequently use a generative model to resynthesize the waveform. Diffusion (Ho et al., 2020) and Flow-Matching (Lipman et al., 2022) are two popular generative models. Previous works, such as GLM4-Voice tokenizer (Zeng et al., 2024) and SemantiCodec (Liu et al., 2024), have demonstrated success using diffusion-based decoders. However, such strategies tend to result in significant information loss. For instance, the semantic tokens in GLM4-Voice lack timbre information and require additional prompts to control timbre during decoding. Notably, the open-sourced GLM4-Voice tokenizer uses a fixed timbre, meaning that any speech encoded by GLM4-Voice will lose its original timbre. To address this information loss in semantic tokens, SemantiCodec introduces acoustic streaming to enhance waveform reconstruction. A key concern, however, is that both SemantiCodec and GLM4-Voice tokenizers demand significantly more computational resources during the inference stage. In the following, we present a comprehensive comparison between ALMTokens and SemantiCodec, focusing on the following aspects: (1) reconstruction performance for speech, sound, and music; (2) semantic information performance for speech, sound, and music; and (3) computational resource requirements during inference, measured using RTF." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 578, + 543, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 578, + 543, + 640 + ], + "spans": [ + { + "bbox": [ + 51, + 578, + 543, + 640 + ], + "type": "text", + "content": "Table 14 shows the speech reconstruction and semantic performance, where we observe that ALMTokenizer outperforms the alternatives in both aspects while using less bitrate. Table 15 presents experimental results for sound and music data, where ALMTokenizer again demonstrates superior performance across all metrics compared to SemantiCodec. In Table 16, we present the model size and RTF metrics, showing that ALMTokenizer has fewer model parameters and significantly surpasses SemantiCodec in inference speed (0.031 vs 0.92)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 654, + 339, + 669 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 654, + 339, + 669 + ], + "spans": [ + { + "bbox": [ + 52, + 654, + 339, + 669 + ], + "type": "text", + "content": "G. The details of ALMTokenizer structure and training" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 674, + 145, + 686 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 674, + 145, + 686 + ], + "spans": [ + { + "bbox": [ + 52, + 674, + 145, + 686 + ], + "type": "text", + "content": "G.1. Model structure" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 693, + 542, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 693, + 542, + 718 + ], + "spans": [ + { + "bbox": [ + 52, + 693, + 542, + 718 + ], + "type": "text", + "content": "Table 17 gives the details of ALMTokensizer configuration, which results in 174M parameters. In all of experiments, for the MAE-transformer encoded and decoder, we adopt a 8 layer transformer layers." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 127, + 45, + 466, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 45, + 466, + 56 + ], + "spans": [ + { + "bbox": [ + 127, + 45, + 466, + 56 + ], + "type": "text", + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 57, + 85, + 538, + 137 + ], + "blocks": [ + { + "bbox": [ + 110, + 74, + 485, + 85 + ], + "lines": [ + { + "bbox": [ + 110, + 74, + 485, + 85 + ], + "spans": [ + { + "bbox": [ + 110, + 74, + 485, + 85 + ], + "type": "text", + "content": "Table 14. The performance comparison between ALMTokensizer and SemanticCodec on VCTK dataset." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 57, + 85, + 538, + 137 + ], + "lines": [ + { + "bbox": [ + 57, + 85, + 538, + 137 + ], + "spans": [ + { + "bbox": [ + 57, + 85, + 538, + 137 + ], + "type": "table", + "html": "
ModelsFPS/TPSCS/BRReconstructionSemantic
UTMOS (↑)DNS-MOS (↑)VISQOL (↑)STOI (↑)PESQ (↑)ASR (↓)EC (↑)
SemantiCodec50/5016384/0.68kbps3.23.573.900.811.7648.317.8
ALMTokensizer12.5/37.52048/0.41kbps3.763.643.780.812.018.329.0
", + "image_path": "f59e452f380f40477af82cd8fc63a94ba7b3a987a6cdc597a6a915a3c081ab47.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 80, + 167, + 515, + 267 + ], + "blocks": [ + { + "bbox": [ + 52, + 154, + 542, + 166 + ], + "lines": [ + { + "bbox": [ + 52, + 154, + 542, + 166 + ], + "spans": [ + { + "bbox": [ + 52, + 154, + 542, + 166 + ], + "type": "text", + "content": "Table 15. The performance comparison between ALMTokensizer and SemanticCodec on Music (MusicCaps) and sound data (AudioCaps)." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 80, + 167, + 515, + 267 + ], + "lines": [ + { + "bbox": [ + 80, + 167, + 515, + 267 + ], + "spans": [ + { + "bbox": [ + 80, + 167, + 515, + 267 + ], + "type": "table", + "html": "
ModelsFPS/TPSCS/BRReconstructionSemantic
Mel loss (↓)STFT loss (↓)VISQOL (↑)Classification (↑)
Sound data
SemantiCodec50/5016384/0.68kbps18.451.402.4738.8%
ALMTokensizer12.5/37.52048/0.41kbps15.01.242.9944%
Music data
SemantiCodec50/5016384/0.68kbps47.91.582.4948%
ALMTokensizer12.5/37.52048/0.41kbps34.41.323.9659%
", + "image_path": "9f4ec6d1eb51ddf04ea710cf886db9d3b82fb21f6a7d4b9c84f522637ec04adb.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 285, + 543, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 285, + 543, + 380 + ], + "spans": [ + { + "bbox": [ + 51, + 285, + 543, + 380 + ], + "type": "text", + "content": "Patchify and UnPatchify modules A single-channel audio signal " + }, + { + "bbox": [ + 51, + 285, + 543, + 380 + ], + "type": "inline_equation", + "content": "\\pmb{x} \\in \\mathcal{R}^{1 \\times N}" + }, + { + "bbox": [ + 51, + 285, + 543, + 380 + ], + "type": "text", + "content": " (where " + }, + { + "bbox": [ + 51, + 285, + 543, + 380 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 51, + 285, + 543, + 380 + ], + "type": "text", + "content": " denotes the sampling points) is processed through the Encodec-style Patchify and UnPatchify modules, which adopt the same structure as Encodec (Défossez et al., 2022), consisting of four convolutional blocks. Each convolutional block consists of a residual unit followed by a down-sampling layer. These convolution blocks effectively encode the audio signal " + }, + { + "bbox": [ + 51, + 285, + 543, + 380 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 51, + 285, + 543, + 380 + ], + "type": "text", + "content": " into an audio frame representation " + }, + { + "bbox": [ + 51, + 285, + 543, + 380 + ], + "type": "inline_equation", + "content": "e \\in \\mathcal{R}^{T \\times d}" + }, + { + "bbox": [ + 51, + 285, + 543, + 380 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 51, + 285, + 543, + 380 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 51, + 285, + 543, + 380 + ], + "type": "text", + "content": " denotes the number of frames and " + }, + { + "bbox": [ + 51, + 285, + 543, + 380 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 51, + 285, + 543, + 380 + ], + "type": "text", + "content": " denotes the dimension of each vector. The convolution blocks are followed by a two-layer LSTM for sequence modeling, followed by a final 1D convolutional layer with a kernel size of 7 and " + }, + { + "bbox": [ + 51, + 285, + 543, + 380 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 51, + 285, + 543, + 380 + ], + "type": "text", + "content": " output channels. The UnPatchify module mirrors the Patchify architecture by substituting stride convolutions with transposed convolutions and reversing the stride order." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 387, + 543, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 387, + 543, + 435 + ], + "spans": [ + { + "bbox": [ + 51, + 387, + 543, + 435 + ], + "type": "text", + "content": "For the StableCodec-style Patchify and UnPatchify modules, we follow the approach in StableCodec (Parker et al., 2024) and use a reshape operation to transform " + }, + { + "bbox": [ + 51, + 387, + 543, + 435 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x} \\in \\mathcal{R}^{t \\times sr}" + }, + { + "bbox": [ + 51, + 387, + 543, + 435 + ], + "type": "text", + "content": " into " + }, + { + "bbox": [ + 51, + 387, + 543, + 435 + ], + "type": "inline_equation", + "content": "e \\in \\mathcal{R}^{T \\times d}" + }, + { + "bbox": [ + 51, + 387, + 543, + 435 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 51, + 387, + 543, + 435 + ], + "type": "inline_equation", + "content": "T = N / 320" + }, + { + "bbox": [ + 51, + 387, + 543, + 435 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 51, + 387, + 543, + 435 + ], + "type": "inline_equation", + "content": "d = 320" + }, + { + "bbox": [ + 51, + 387, + 543, + 435 + ], + "type": "text", + "content": ". We then apply a linear layer to map the dimension to " + }, + { + "bbox": [ + 51, + 387, + 543, + 435 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 51, + 387, + 543, + 435 + ], + "type": "text", + "content": ". Finally, we add four transformer layers for sequence modeling. Similarly, the UnPatchify module mirrors the Patchify architecture." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 441, + 543, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 441, + 543, + 489 + ], + "spans": [ + { + "bbox": [ + 51, + 441, + 543, + 489 + ], + "type": "text", + "content": "Discriminators For the discriminators, we follow prior work (Défossez et al., 2022), which combines mel-spectrogram and log-mel-spectrogram features and inputs them into a network consisting of several convolutional layers. Specifically, we use six discriminators with different configurations: the hidden dimensions are set as 64, 128, 256, 512, 512, 512, and the hop lengths are set as 32, 64, 128, 256, 512, 1024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 502, + 329, + 513 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 502, + 329, + 513 + ], + "spans": [ + { + "bbox": [ + 52, + 502, + 329, + 513 + ], + "type": "text", + "content": "G.2. Reconstruction loss and adversarial loss for ALMTokenizer" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 521, + 543, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 521, + 543, + 582 + ], + "spans": [ + { + "bbox": [ + 51, + 521, + 543, + 582 + ], + "type": "text", + "content": "Let the reconstructed signal be " + }, + { + "bbox": [ + 51, + 521, + 543, + 582 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{x}}" + }, + { + "bbox": [ + 51, + 521, + 543, + 582 + ], + "type": "text", + "content": ". For the reconstruction loss, we design it from two perspectives: the time domain and the frequency domain. We first compute the " + }, + { + "bbox": [ + 51, + 521, + 543, + 582 + ], + "type": "inline_equation", + "content": "L_{1}" + }, + { + "bbox": [ + 51, + 521, + 543, + 582 + ], + "type": "text", + "content": " loss between " + }, + { + "bbox": [ + 51, + 521, + 543, + 582 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 51, + 521, + 543, + 582 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 51, + 521, + 543, + 582 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{x}}" + }, + { + "bbox": [ + 51, + 521, + 543, + 582 + ], + "type": "text", + "content": " in the time domain. Next, we compute the " + }, + { + "bbox": [ + 51, + 521, + 543, + 582 + ], + "type": "inline_equation", + "content": "L_{1}" + }, + { + "bbox": [ + 51, + 521, + 543, + 582 + ], + "type": "text", + "content": " loss between the STFT spectrogram of " + }, + { + "bbox": [ + 51, + 521, + 543, + 582 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 51, + 521, + 543, + 582 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 51, + 521, + 543, + 582 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{x}}" + }, + { + "bbox": [ + 51, + 521, + 543, + 582 + ], + "type": "text", + "content": " in the frequency domain. Following (Wang et al., 2024b), we employ a sub-band split strategy to divide the spectrogram into several parts. The adversarial loss is employed to enhance the perceptual quality of the generated audio:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 180, + 586, + 542, + 619 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 180, + 586, + 542, + 619 + ], + "spans": [ + { + "bbox": [ + 180, + 586, + 542, + 619 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {d} = \\frac {1}{K} \\sum_ {i = 1} ^ {K} \\max (0, 1 - D _ {k} (\\boldsymbol {x})) + \\max (0, 1 + D _ {k} (\\hat {\\boldsymbol {x}})) \\tag {3}", + "image_path": "ffc97f5c916131474b63ace04ed89cb0ce6ca0f77448c37ae20c999aec41e707.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 51, + 625, + 543, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 625, + 543, + 649 + ], + "spans": [ + { + "bbox": [ + 51, + 625, + 543, + 649 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 51, + 625, + 543, + 649 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 51, + 625, + 543, + 649 + ], + "type": "text", + "content": " denotes the number of discriminators. During the training stage, the adversarial loss for the generator is computed as a hinge loss over the logits of these discriminators:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 223, + 655, + 542, + 688 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 223, + 655, + 542, + 688 + ], + "spans": [ + { + "bbox": [ + 223, + 655, + 542, + 688 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {a d v} = \\frac {1}{K} \\sum_ {i = 1} ^ {K} \\max (0, 1 - D _ {k} (\\hat {\\boldsymbol {x}})) \\tag {4}", + "image_path": "35e03b6e4c5c45765bd47a9c8f63f9226b39547c700af3edf772945e2d8a0229.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 51, + 693, + 543, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 693, + 543, + 718 + ], + "spans": [ + { + "bbox": [ + 51, + 693, + 543, + 718 + ], + "type": "text", + "content": "The feature loss " + }, + { + "bbox": [ + 51, + 693, + 543, + 718 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{feat}" + }, + { + "bbox": [ + 51, + 693, + 543, + 718 + ], + "type": "text", + "content": " is computed by taking the average absolute difference between the discriminator's internal layer outputs for the generated audio and those for the corresponding real audio." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 127, + 45, + 467, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 45, + 467, + 56 + ], + "spans": [ + { + "bbox": [ + 127, + 45, + 467, + 56 + ], + "type": "text", + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 291, + 731, + 304, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 731, + 304, + 740 + ], + "spans": [ + { + "bbox": [ + 291, + 731, + 304, + 740 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 189, + 85, + 405, + 127 + ], + "blocks": [ + { + "bbox": [ + 130, + 74, + 464, + 85 + ], + "lines": [ + { + "bbox": [ + 130, + 74, + 464, + 85 + ], + "spans": [ + { + "bbox": [ + 130, + 74, + 464, + 85 + ], + "type": "text", + "content": "Table 16. The model size and RTF comparison between SemantiCodec and ALMTokensizer." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 189, + 85, + 405, + 127 + ], + "lines": [ + { + "bbox": [ + 189, + 85, + 405, + 127 + ], + "spans": [ + { + "bbox": [ + 189, + 85, + 405, + 127 + ], + "type": "table", + "html": "
ModelModel size (M) (↓)RTF (↓)
SemantiCodec5070.92
ALMTokenizer (Ours)1740.031
", + "image_path": "7eb3af90218801d9df7252db544ed8ebf2c58ddeb96adbbaf15ad379992732a6.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 157, + 136, + 438, + 293 + ], + "blocks": [ + { + "bbox": [ + 157, + 136, + 438, + 293 + ], + "lines": [ + { + "bbox": [ + 157, + 136, + 438, + 293 + ], + "spans": [ + { + "bbox": [ + 157, + 136, + 438, + 293 + ], + "type": "table", + "html": "
ALMTokenizer
Input shape(B, 1, N)
Patchify module (output)(B, T, d), T=N/320
Token Interleaving and Retrievalw ∈ [2, 3, 4, 5, 6, 7, 8, 9, 10]
Dimension of transformer encoder256
The number of transformer encoder24
Dimension of transformer decoder512
The number of transformer decoder24
Codebook size2048
VQ layers3
Number of Transformer heads64
UnPatchify module (output)(B, 1, N)
", + "image_path": "6d44cde2f64ff7b03fe3f9a37cca4f2bb251ac87eae5b10bc5cf0924c1174a82.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 192, + 306, + 402, + 318 + ], + "lines": [ + { + "bbox": [ + 192, + 306, + 402, + 318 + ], + "spans": [ + { + "bbox": [ + 192, + 306, + 402, + 318 + ], + "type": "text", + "content": "Table 17. ALMTokenizer model backbone configurations" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 52, + 335, + 144, + 347 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 335, + 144, + 347 + ], + "spans": [ + { + "bbox": [ + 52, + 335, + 144, + 347 + ], + "type": "text", + "content": "G.3. Training details" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 354, + 544, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 354, + 544, + 390 + ], + "spans": [ + { + "bbox": [ + 51, + 354, + 544, + 390 + ], + "type": "text", + "content": "The AdamW optimizer is used in the training. We set the learn rate as " + }, + { + "bbox": [ + 51, + 354, + 544, + 390 + ], + "type": "inline_equation", + "content": "1e - 4" + }, + { + "bbox": [ + 51, + 354, + 544, + 390 + ], + "type": "text", + "content": ". We train the model with 200k steps. The final loss as following shows. We set " + }, + { + "bbox": [ + 51, + 354, + 544, + 390 + ], + "type": "inline_equation", + "content": "\\lambda_{1} = 0.5" + }, + { + "bbox": [ + 51, + 354, + 544, + 390 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 51, + 354, + 544, + 390 + ], + "type": "inline_equation", + "content": "\\lambda_{2} = 0.1" + }, + { + "bbox": [ + 51, + 354, + 544, + 390 + ], + "type": "text", + "content": " during our experiments. We conduct all of the experiments with 4 NVIDIA A100-80G GPUs." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 197, + 398, + 542, + 412 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 197, + 398, + 542, + 412 + ], + "spans": [ + { + "bbox": [ + 197, + 398, + 542, + 412 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\mathbf {L} _ {\\text {a d v}} + \\mathbf {L} _ {\\text {f e a t}} + \\mathbf {L} _ {\\text {r e c}} + \\lambda_ {1} \\mathbf {L} _ {\\text {M A E}} + \\lambda_ {2} \\mathbf {L} _ {\\text {A R}} \\tag {5}", + "image_path": "12cb13002e9cb2cd2e016f4eedbe2301e9a8b67a8aa802b90f2eea6edb26fbcc.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 426, + 206, + 440 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 426, + 206, + 440 + ], + "spans": [ + { + "bbox": [ + 52, + 426, + 206, + 440 + ], + "type": "text", + "content": "H. Reproducibility Statement" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 446, + 544, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 446, + 544, + 483 + ], + "spans": [ + { + "bbox": [ + 51, + 446, + 544, + 483 + ], + "type": "text", + "content": "To enhance reproducibility, we provide the pseudocode of ALMTokensizer. In the future, we plan to improve both the model structure and training data to obtain more robust models, especially for music and sound, and release the code for the research community." + } + ] + } + ], + "index": 9 + }, + { + "type": "code", + "bbox": [ + 53, + 509, + 542, + 719 + ], + "blocks": [ + { + "bbox": [ + 221, + 497, + 373, + 508 + ], + "lines": [ + { + "bbox": [ + 221, + 497, + 373, + 508 + ], + "spans": [ + { + "bbox": [ + 221, + 497, + 373, + 508 + ], + "type": "text", + "content": "Listing 1. Pseudocode of ALMTokenizer" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 53, + 509, + 542, + 719 + ], + "lines": [ + { + "bbox": [ + 53, + 509, + 542, + 719 + ], + "spans": [ + { + "bbox": [ + 53, + 509, + 542, + 719 + ], + "type": "text", + "content": "class ALMTokensizer: def __init__(self, transformerEncoder_args, transformerDecoder_args, maeDecoder_args, depth_gpt_args, patchify_args, encoder_embedding_dim, decoder_embedding_dim, semantic_prior_path, mask_rate, window_sizes = [2,3,4,5,6,7,8,9,10],): self(window_sizes = window_sizes self.transformerEncoder = Transformer(transformerEncoder_args) self.transformerDecoder = Transformer(transformerDecoder_args) self.maedecoder = Transformer(maedecoder_args) self.Patchify = EncodeEncoder(patchify_args) self.UnPatchify = EncodeDecoder(patchify_args)" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "code_body" + } + ], + "index": 11, + "sub_type": "code", + "guess_lang": "python" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 127, + 45, + 468, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 45, + 468, + 57 + ], + "spans": [ + { + "bbox": [ + 127, + 45, + 468, + 57 + ], + "type": "text", + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 291, + 731, + 303, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 731, + 303, + 741 + ], + "spans": [ + { + "bbox": [ + 291, + 731, + 303, + 741 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 76, + 68, + 541, + 717 + ], + "blocks": [ + { + "bbox": [ + 76, + 68, + 541, + 717 + ], + "lines": [ + { + "bbox": [ + 76, + 68, + 541, + 717 + ], + "spans": [ + { + "bbox": [ + 76, + 68, + 541, + 717 + ], + "type": "text", + "content": "self.cls_token = nnParameter(torch.zeros(1, 1, encoder_embedding_dim))\nselfmasked_token = nnParameter(torch.zeros(1, 1, decoder_embedding_dim))\ncheckpoint = torch.load(semantic_prior_path, map_location=\"cpu\")\nself.vq = RVQ_semantic(\n input_dim=encoder_embedding_dim,\n semantic_prior = checkpoint,\n layers = 3)\nself.depth_gpt = GPT Decoder(depth_gpt_args)\nself.temp_window_size = 6\nself_mask_rate = mask_rate\ndef Encoder_token_Interleaving(self, x):\n B, T, D = x.shape # batch, length, dim\ncls_tokens = self.cls_tokenrepeat(B, (T//self.tmp_window_size), 1).unsqueeze(2)\n new_T = T + (T // self.tmp_window_size)\nx reshaped = x reshape(B, T // self.tmp_window_size, self.tmp_window_size, D)\nx_withCls = torch.cat([x reshaped, cls_tokens], dim=2)\nnew_x = x_withCls.reshape(B, -1, D)\nreturn new_x\ndef Encoder_token_Retrieval(self, x):\n B, new_T, D = x.shape\noriginal_T = new_T - new_T // (self.tmp_window_size + 1)\nmask Indices = [(i + 1) * (self.tmp_window_size + 1) - 1 for i in range(original_T // self.tmp_window_size)]\ncls_tokens = new_x[;, mask Indices, :]\nreturnCLS_tokens\ndef Decoder_token_Interleaving(self, en_token):\n B, T, D = en_token.shape\nx = self-mask_tokenrepeat(B, 1, 1)\nnew_T = en_token.shape[1] * self.tmp_window_size + en_token.shape[1]\nx = x.repeata(1, en_token.shape[1] * self.tmp_window_size, 1)\nx = x.reshape(B, -1, self.tmp_window_size, D)\nx_with Masks = torch.cat([x, en_token.unsqueeze(2)], dim=2)\nnew_x = x_with Masksreshape(B, -1, D)\nreturn new_x\ndef Decoder_token_Retrieval(self, new_x):\n B, new_T, D = new_x.shape\nnum_masks = new_T // (self.interval + 1)\noriginal_T = new_T - num_masks\nmaskIndices = [(i + 1) * (self.interval + 1) - 1 for i in range(num_masks)]\nallIndices = list(range(new_T))\nmaskIndices = [i for i in allIndices if i not in maskIndices]\nmask Frames = new_x[;, maskIndices,:]\nreturn mask Frames\ndef forward(self, x):\n x_len = x.shape[-1]\nself.temp_window_size = choice(selfwindow_sizes)\nemb Frames = self.Patchify(x)\nif self.trainin:\n emb Frames_mask = self.apply_mask(emb Frames, mask_rate = self-mask_rate)\ninterleaving Frames = self.Encoder_token_Interleaving(emb Frames_mask)\npredictDSP = self.maedecoder(interleavingFrames)\nmae_loss = L1_loss(predictDSP, emb Frames)\nlatent_tokens = self.transformer Encoder(interleavingFrames)\nquery_token = self.Encoder_token_Retrieval(latent_tokens)\nQuantized_token, codes, allquantized = self.vq(query_token)\ncat_quantized = []\nfor q_emb in all_quantized:" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "code", + "guess_lang": "python" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 127, + 45, + 468, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 45, + 468, + 56 + ], + "spans": [ + { + "bbox": [ + 127, + 45, + 468, + 56 + ], + "type": "text", + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 291, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 291, + 731, + 303, + 740 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 95, + 68, + 484, + 159 + ], + "blocks": [ + { + "bbox": [ + 95, + 68, + 484, + 159 + ], + "lines": [ + { + "bbox": [ + 95, + 68, + 484, + 159 + ], + "spans": [ + { + "bbox": [ + 95, + 68, + 484, + 159 + ], + "type": "text", + "content": "q_emb = q_emb.reshape(-1, q_emb.shape[-1]).unsqueeze(1) \ncat_quantized.append(q_emb) \ncat_quantized = torch.cat(cat_quantized, dim=1) \ngpt_loss = self.depth_gpt.compute_prior_loss(cat_quantized) \nde_interleaving Frames = self.Decoder_token_Interleaving(Quantized_token) \ndelatent_token = self.transformer Decoder(de_interleaving Frames) \nmask_tokens = self.Decoder_token_Retestval(de_forensic_token) \nx_ = self.UnPatchify mask_tokens) \nreturn x_, mae_loss, gpt_loss" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "code", + "guess_lang": "python" + }, + { + "bbox": [ + 52, + 180, + 121, + 192 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 180, + 121, + 192 + ], + "spans": [ + { + "bbox": [ + 52, + 180, + 121, + 192 + ], + "type": "text", + "content": "I. Limitation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 201, + 544, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 201, + 544, + 333 + ], + "spans": [ + { + "bbox": [ + 52, + 201, + 544, + 333 + ], + "type": "text", + "content": "In this study, we present ALMTokenizer, a low-bitrate, semantic-rich audio codec tokenizer. We demonstrate that ALM-Tokenizer excels in both reconstruction and semantic information retention under low-bitrate conditions. However, we acknowledge that there is still significant room for improvement in reconstruction performance, particularly for sound and music data. Building an audio tokenizer for sound and music in the low-bitrate setting poses additional challenges. In terms of semantic information, ALMTokenizer still lags behind traditional SSL models. Although we propose several training losses to enhance semantic information in the codec model, the improvements are limited and, in some cases, negatively impact reconstruction quality. We recognize the need for a careful design and balance of these semantic loss terms. Additionally, the multi-stage training strategy increases training complexity. These training strategy brings waste. Most of the components are eventually discarded, e.g. MAE-transformer encoder/decoder, MAE-decoder, and depth AR-transformer. These components would have made sense to still utilize them for some purpose, e.g. the AR decoder could have been used to initialize the depth transformer in the Language modeling task. These concerns are left for future work." + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 127, + 45, + 466, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 45, + 466, + 56 + ], + "spans": [ + { + "bbox": [ + 127, + 45, + 466, + 56 + ], + "type": "text", + "content": "A Low-bitrate and Semantic-rich Audio Codec Tokenizer for Audio Language Modeling" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10358/19c9a29c-ccbf-4591-9305-89a160f95b8c_content_list.json b/data/2025/2504_10xxx/2504.10358/19c9a29c-ccbf-4591-9305-89a160f95b8c_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..87c83072b772d28998cd66788e88d33a68d9d5c1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10358/19c9a29c-ccbf-4591-9305-89a160f95b8c_content_list.json @@ -0,0 +1,1553 @@ +[ + { + "type": "text", + "text": "FingER: Content Aware Fine-grained Evaluation with Reasoning for AI-Generated Videos", + "text_level": 1, + "bbox": [ + 86, + 101, + 911, + 148 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Rui Chen", + "bbox": [ + 192, + 157, + 269, + 172 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "chenrui.chen@alibaba-inc.com", + "bbox": [ + 125, + 174, + 334, + 188 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "AMAP, Alibaba Group", + "bbox": [ + 153, + 189, + 307, + 204 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Beijing, China", + "bbox": [ + 181, + 205, + 279, + 219 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Lei Sun", + "bbox": [ + 467, + 157, + 531, + 172 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ally.sl@alibaba-inc.com", + "bbox": [ + 418, + 174, + 580, + 188 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "AMAP, Alibaba Group", + "bbox": [ + 421, + 189, + 575, + 204 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Beijing, China", + "bbox": [ + 450, + 205, + 547, + 219 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jing Tang", + "bbox": [ + 725, + 157, + 807, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "guangyu.tj@alibaba-inc.com", + "bbox": [ + 669, + 175, + 864, + 188 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "AMAP, Alibaba Group", + "bbox": [ + 691, + 189, + 843, + 204 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Beijing, China", + "bbox": [ + 718, + 205, + 816, + 219 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Geng Li", + "bbox": [ + 331, + 231, + 397, + 247 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "xiaofeng/lg@alibaba-inc.com", + "bbox": [ + 266, + 247, + 462, + 262 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "AMAP, Alibaba Group", + "bbox": [ + 287, + 263, + 439, + 277 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Beijing, China", + "bbox": [ + 315, + 279, + 411, + 292 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xiangxiang Chu", + "bbox": [ + 565, + 231, + 699, + 247 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "chuxiangxiang.cxx@alibaba-inc.com", + "bbox": [ + 509, + 247, + 756, + 261 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "AMAP, Alibaba Group", + "bbox": [ + 555, + 263, + 707, + 277 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Beijing, China", + "bbox": [ + 584, + 279, + 681, + 292 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 83, + 301, + 156, + 316 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent advances in video generation have posed great challenges in the assessment of AI-generated content, particularly with the emergence of increasingly sophisticated models. The various inconsistencies and defects observed in such videos are inherently complex, making overall scoring notoriously difficult. In this paper, we emphasize the critical importance of integrating fine-grained reasoning into video evaluation, and we propose FingER, a novel entity-level reasoning evaluation framework that first automatically generates Fine-grained Entity-level questions, and then answers those questions by a Reasoning model with scores, which can be subsequently weighted summed to an overall score for different applications. Specifically, we leverage LLMs to derive entity-level questions across five distinct perspectives, which (i) often focus on some specific entities of the content, thereby making answering or scoring much easier by MLLMs, and (ii) are more interpretable. Then we construct a FingER dataset, consisting of approximately 3.3k videos and corresponding 60k fine-grained QA annotations, each with detailed reasons. Based on that, we further investigate various training protocols to best incentivize the reasoning capability of MLLMs for correct answer prediction. Extensive experiments demonstrate that a reasoning model trained using Group Relative Policy Optimization (GRPO) with a cold-start strategy achieves the best performance. Notably, our model surpasses existing methods by a relative margin of $11.8\\%$ on GenAI-Bench and $5.5\\%$ on Monet-Bench with only 3.3k training videos, which is at most one-tenth of the training samples utilized by other methods. Our code and dataset will be released soon.", + "bbox": [ + 81, + 320, + 483, + 694 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 83, + 710, + 218, + 724 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent advancements in Text-to-Video (T2V) generative models [2, 4, 45] have demonstrated significant progress in producing visually appealing and content-rich videos. For instance, post-Sora models such as Kling have shown the ability to generate high-resolution videos that closely adhere to textual prompts. However, these models often produce localized artifacts, inconsistencies, and violations of physical laws. These issues highlight the necessity for the development of robust and reliable quality assessment methods for AI-generated video content.", + "bbox": [ + 81, + 729, + 482, + 853 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Early research on evaluating AI-generated videos has primarily relied on feature-based metrics, such as the Frechet Video Distance (FVD) [30] and optical flow-based methods like RAFT [26]. While", + "bbox": [ + 81, + 854, + 482, + 896 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/7d6921f0086a2ed04a58acfd7dec5e0ba42ca4b1e6fb8d80da3e0245f2a73809.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 511, + 301, + 913, + 412 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/9e3526d378290d0e5f1f5b7280de9f4ac5ef4c8a53b37682de9d2eef40d2c009.jpg", + "image_caption": [ + "(b)", + "(c)", + "(d)", + "Figure 1: Advanced generation models often exhibit localized defects while maintaining overall visually appealing, as illustrated in (a), which requires fine-grained in-depth understanding. (b) and (c) show that even with detailed instructional prompts and entity-level questions, GPT-4o still fails to identify this hand deformation. (d) shows the effectiveness of our work by integrating reasoning model with fine-grained entity-level questions." + ], + "image_footnote": [], + "bbox": [ + 516, + 426, + 913, + 585 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "these methods effectively assess overall visual quality and dynamic characteristics, they fall short in capturing nuanced aspects that require deeper semantic understanding and fine-grained reasoning. To address these limitations, recent studies have introduced MLLMs for more comprehensive evaluations. For example, VideoScore [11] proposes a framework that evaluates five distinct aspects of video quality using an MLLM to assign scores ranging from 1 to 4. VisionReward [37] aligns video generation with human perception by formulating predefined judgment questions and fine-tuning a video-based MLLM to compute weighted scores. Similarly, LiFT [33]", + "bbox": [ + 511, + 756, + 913, + 896 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.10358v1 [cs.CV] 14 Apr 2025", + "bbox": [ + 22, + 262, + 60, + 705 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "learns a reward model that provides reasons and scores across multiple aspects to align the generation model with human preferences. Despite these advancements, two key challenges persist:", + "bbox": [ + 83, + 107, + 480, + 147 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "(i) Inadequacy of Fine-grained Video Reasoning: Although advanced generative models have significantly improved global visual quality by reducing issues such as blurriness and flickering, they still exhibit localized spatiotemporal inconsistencies, distortions, unnatural artifacts, and violations of physical laws, especially in scenarios involving complex motion or multiple entities. For instance, Fig 1(a) shows a video generated by Pixverse that, despite its high overall visual appeal, contains a noticeably deformed hand in a localized area. This example underscores the need for more fine-grained and context-aware reasoning capabilities in video understanding, moving beyond superficial visual pattern recognition to incorporate temporally grounded and semantically rich analysis. (ii) Domain Gap in AI-Generated Videos: Current state-of-the-art MLLMs struggle to capture the intrinsic characteristics of AI-generated videos, even with well-defined prompts. As illustrated in Fig 1(b) and (c), GPT-4o misidentifies the deformed hand in a video and assigns a high score based on misleading explanations. This issue is primarily attributed to a domain gap between the training data used by MLLMs and the unique features of AI-generated videos. In essence, AI-generated videos can deceive MLLMs in certain latent feature spaces. Bridging this gap requires a high-quality dataset of AI-generated videos. Moreover, developing strategies to enhance the generalization of MLLMs to AI-generated videos remains an open challenge.", + "bbox": [ + 81, + 148, + 482, + 479 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Inspired by the Question Generation and Answering (QG/A) framework [6] and recent reasoning works [7, 21, 22, 46] that demonstrate a significant self-emergence of complex cognitive reasoning abilities induced by Deepseek R1 [10], we argue that incorporating fine-grained reasoning abilities would significantly enhance the video quality assessment. In this paper, we propose FingER, a novel framework that first decomposes the overall evaluation into fine-grained entity-level questions and then answers these questions with corresponding scores by a reasoning model, which is fine-tuned on our high-quality dataset using GRPO with a cold-start initialization. Specifically, we employ five distinct aspects as defined in VideoScore [11], including text-to-video alignment, temporal consistency, factual consistency, dynamic degree, and visual quality. By deriving such fine-grained entity-level questions, our framework not only enables the model to explicitly focus on specific characteristics of certain entities, thereby facilitating a more fine-grained understanding, but also enhances interpretability through these structured QA pairs.", + "bbox": [ + 81, + 479, + 482, + 729 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Based on these questions, we prompted several strong MLLMs [13, 25] to provide answers. However, we observed that these models struggle to provide correct answers, particularly in aspects like factual consistency. As stated before, we attribute this to the lack of high-quality AI-generated video datasets and the inadequate reasoning capabilities of current models. Therefore, we curated a fine-grained AI-generated video reasoning dataset, FingER-Instruct-60k, which consists of $3.3\\mathrm{k}$ AI-generated videos sourced from advanced generation models like Kling, Luma, Vidu, PixVerse, CogVideoX [38], etc. For each video, we generate fine-grained questions and annotate them with 'Yes/No'. To ease human labor and", + "bbox": [ + 81, + 729, + 482, + 881 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "also reduce potential errors, we leverage MLLMs to generate detailed reasoning explanations given each question and its answer. (Note that, while MLLMs often struggle to answer these questions correctly, they demonstrate higher possibilities of producing coherent reasoning when the answer is explicitly provided, suggesting the presence of underlying reasoning capabilities.) These generated reasons were subsequently re-checked and refined by human annotators to ensure accuracy and quality. At last, we collect 60k fine-grained QA annotations with high-quality detailed reasons.", + "bbox": [ + 511, + 107, + 913, + 231 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To enhance the video reasoning capabilities, we choose Qwen2.5-VL [1], and explore multiple training protocols on our dataset, including directly training with answers, training with reasons, zero GRPO training and GRPO training with a cold-start initialization. Our experiments reveal that integrating high-quality reasons can largely increase the performance along with the interpretability, and GRPO with cold-start can further enhance its performance, especially in dimensions that require in-depth understanding. We also test our reasoning model in a zero-shot manner on public benchmarks, and still consistently achieve state-of-the-art performance.", + "bbox": [ + 511, + 232, + 913, + 369 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, we propose an entity-level quality assessment framework with strong reasoning and generalization capabilities. To the best of our knowledge, our work is the first to introduce entity-level reasoning into the quality assessment of AI-generated videos.", + "bbox": [ + 511, + 369, + 913, + 425 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our contributions can be summarized as follows:", + "bbox": [ + 529, + 426, + 830, + 438 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Novel Evaluation Approach. We propose a novel evaluation approach FingER, designed for practical AI-generated video quality assessment. It comprises an entity-level question generation module and a video reasoning model that provides corresponding scores. By emphasizing fine-grained reasoning, our approach effectively addresses localized defects in AI-generated videos that require in-depth understanding and significantly enhances interpretability.", + "- Fine-grained Reasoning Dataset. We present a new dataset for AI-generated video reasoning, containing 3.3k videos and 60k entity-level QA annotations sourced from advanced generation models. Each QA pair is annotated with detailed reasons. This dataset aims to further advance research in this field.", + "- Enhanced Training Protocols. We explore multiple training protocols to enhance the fine-grained video reasoning capabilities of MLLMs. Notably, we are the first to introduce GRPO training into AI-generated video quality assessment, which proves to be highly effective in improving both reasoning and generalization abilities", + "- **Strong Performance.** Extensive experiments demonstrate the effectiveness of our approach. We achieve state-of-the-art performance on public benchmarks using only one-tenth of the training videos, thereby highlighting the superior generalization capability of our model." + ], + "bbox": [ + 540, + 443, + 913, + 787 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 514, + 801, + 658, + 816 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1 Video Quality Assessment", + "text_level": 1, + "bbox": [ + 514, + 821, + 772, + 837 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Early approaches relied on feature-based metrics, such as Fréchet Video Distance (FVD) [30], Inception Score (IS) [28], and CLIPSim [29]. And benchmark works like EvalCrafter [40] and VBench [47] introduced comprehensive evaluation frameworks with 18 and 16", + "bbox": [ + 513, + 840, + 913, + 896 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/7e79c5f5413e855405378af10caaf9c9af8f05fc58ac361b7acede64c70a4a2e.jpg", + "image_caption": [ + "Figure 2: The overview of our proposed FingER framework, including (a) the evaluation pipeline, (b) FingER-Instruct-60k dataset curation, and (c) GRPO training of our reasoning model." + ], + "image_footnote": [], + "bbox": [ + 84, + 103, + 459, + 460 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/af4c5252e3a743d7a1ba6d4c688efce7265a4c7b40a1e0158ee15f39758821a2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 460, + 103, + 913, + 460 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "metrics, respectively. However, these methods fall short in assessing deep semantic understanding or aligning with human perception.", + "bbox": [ + 81, + 522, + 482, + 550 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "With the rapid advancement of MLLMs [1, 5, 8, 25], increasing studies have explored to leverage their capabilities to facilitate image/video quality evaluation [6, 12, 18, 19, 36]. Inspired by DSG [6], which uses question generation/answering (QG/A) for interpretable assessment, T2VScore [14] adopted a QA framework for T2V alignment. T2VQA [27] introduced the T2VQA-DB dataset, comprising 10k videos annotated with Mean Opinion Scores (MOS), and trained a transformer-based model to predict these scores. Similarly, VideoScore [11] proposed a larger dataset across five dimensions and employed a MLLM for scoring. VMBench [20] introduced perception-aligned motion metrics to evaluate motion quality. While these methods predict scores or labels, they often overlook the reasoning behind assessments, limiting their effectiveness. Our work distinguishes itself by incorporating entity-level reasoning for evaluating advanced generation models reliably.", + "bbox": [ + 81, + 550, + 482, + 758 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Another line of research focuses on reward models for improving generative models via Reinforcement Learning from Human Feedback (RLHF), such as Diffusion-DPO [32], VisionReward [37] and UnifiedReward [34]. While these efforts target generative model optimization, our work emphasizes practical video quality evaluation, we expect it is able to further benefit the generation models using RLHF in future work.", + "bbox": [ + 81, + 758, + 482, + 854 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2 Reasoning Inference in Large Models", + "text_level": 1, + "bbox": [ + 513, + 521, + 861, + 537 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Reasoning inference aims to emulate human-like thinking processes by forming the final answer through a Large Language Model (LLM). Specifically, to answer a given question, an LLM is required to think divergently and record the thinking processes, which are subsequently referenced when formulating the final answer. This approach has inspired a variety of research, including prompting-based Chain-of-Thought (CoT) [35], planning-based Graph-of-Thought [3] and Tree-of-Thought [39] processing, reward methods [16], and supervised fine-tuning (SFT) datasets with sufficient context [41]. Notably, DeepSeek-R1 [10] integrates specific prompts with reinforcement learning (RL), enabling the model to first generate the thinking process before producing the final answer. This method allows for supervised fine-tuning with a small amount of annotated data containing thinking processes, followed by reinforcement learning fine-tuning on more data without thinking processes. A very recent approach [7] proposes a highly simplified reinforcement learning framework and demonstrates its validity across several benchmarks.", + "bbox": [ + 511, + 539, + 915, + 789 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 Method", + "text_level": 1, + "bbox": [ + 514, + 806, + 609, + 821 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we first introduce our entity-level video quality assessment framework - FingER in Sec. 3.1. Then, we detail the data curation pipeline of our proposed dataset, namely FingER-Instruct-60k in Sec. 3.2. In the end, we combine multiple training methods with our proposed instruction tuning dataset, from the", + "bbox": [ + 511, + 825, + 915, + 897 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "basic supervised fine-tuning (SFT), to reasoning training with reinforcement learning (RL), as detailed in Sec. 3.3.", + "bbox": [ + 81, + 106, + 485, + 135 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 Entity-level VQA Framework", + "text_level": 1, + "bbox": [ + 83, + 184, + 364, + 200 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For Text-to-Video (T2V) generation task, user input prompt is the only key instruction for generative models to understand and generate content that well-aligned with user's intent. To perform entity-level quality assessment of AI-generated videos, we start from understanding the user's prompt through extracting entities, attributes, and actions within itself. Inspired by DSG [6] in Text-to-Image (T2I) evaluation, we also utilize closed-source Large-Language-Model (LLM) to perform textual understanding and the following entity extraction. As shown in Fig. 4, we provide abundant in-context learning (ICL) [35] examples from different video generation scenarios and formulate the final input for GPT-4o [13], in which way we can harvest more steady entity extraction results.", + "bbox": [ + 81, + 203, + 482, + 369 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "With entities extracted from the user's prompt, we generate entity-level questions from five distinct video quality assessment dimensions, including visual quality, text-to-video alignment, temporal consistency, factual consistency, and dynamic degree. For each dimension, we provide a detailed explanation followed by several key points, formulating the context information when prompting the LLM. We also prepare adequate entity-level in-context learning examples, which are summarized from videos with and without obvious artifacts or hallucinations. In this way, we can help the LLMs to better understand which question should be asked when coping with a specific entity along with the given assessment dimension. In short, we break down the granularity of fine-grained video quality assessment from multi-dimensional level to entity-level. And the intuition behind entity-level question generation is that we hope fine-grained question/answering can guide the MLLM to focus on understanding the correlation between entity-level textual description and its corresponding visual appearance based on the video content.", + "bbox": [ + 81, + 369, + 482, + 618 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "After the entity-level question generation procedure, our fin-tuned MLLM answers the above questions with a simple \"Yes\" or \"No\", along with a detailed reasoning process explaining why the answer is that. Learning the logical reasoning process is critical for model performance improvements, as detailed in the experiment Sec. 4.4. The outputted reason can also be useful when conducting practical video quality assessment, which is more interpretable and user-friendly. To formulate a final score representing the overall quality of AI-generated videos, we start by calculating the probability of the answer token (\"Yes\" or \"No\") for each entity-level question to represent the entity-level score. Since there are multiple \"Yes\" and \"No\" with different formats but similar meanings in the vocabulary of our MLLM, we first gather the token set for \"Yes\" and \"No\". In this paper, we take [\"Yes\", \"yes\", \"YES\", \"Yes\", \"Yes\"] as the token set for answer \"Yes\", and [\"No\", \"no\", \"NO\", \"No\", \"No\"] for answer \"No\", denoted by $T_{Y}$ and $T_{N}$ , respectively. With logits from the answer token, we extract all the logit whose token id is within the token set, and apply softmax over $T_{Y} \\cup T_{N}$ , as illustrated in Eq. 1. Then, given the entity-level question $q$ , we can get the answer's probability $P(No \\mid q)$ and $P(Yes \\mid q)$ with a simple sum", + "bbox": [ + 81, + 619, + 482, + 896 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "up.", + "bbox": [ + 514, + 109, + 539, + 121 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nP (N o \\mid q) = \\sum_ {\\substack {i = 1 \\\\ m}} ^ {n} \\text {S o f t m a x} (x _ {i}), x _ {i} \\in T _ {N}; \\tag{1}\n$$\n", + "text_format": "latex", + "bbox": [ + 589, + 119, + 911, + 162 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nP (Y e s \\mid q) = \\sum_ {j = 1} ^ {m} S o f t m a x (y _ {j}), y _ {j} \\in T _ {Y}.\n$$\n", + "text_format": "latex", + "bbox": [ + 589, + 161, + 836, + 195 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Instead of directly using the derived probability as the entity-level score, we still need the judgment on whether the question is positive or negative. For example, given the question \"Do the attributes of the table in the video (such as size, shape, and material) align with real-world characteristics?\" from the factual consistency dimension, it is apparent that the factual consistency of the assessed video goes up with a positive \"Yes\" answer. We define this type of question as a positive one, and vice versa. We denote the status of an entity-level question with $q_{stat}$ , if $q_{stat}$ equals 1, it means that the question is positive; otherwise, the question is negative.", + "bbox": [ + 511, + 196, + 915, + 335 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nS _ {\\text {e n t i t y}} = \\left\\{ \\begin{array}{l l} P (N o \\mid q), & \\text {i f} q _ {\\text {s t a t}} = 0; \\\\ P (Y e s \\mid q), & \\text {i f} q _ {\\text {s t a t}} = 1. \\end{array} \\right. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 591, + 339, + 911, + 372 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "With the aforementioned preparations setup, we propose our entity-level score $S_{entity}$ , which correlates positively with the quality of the assessed video. When the entity-level question is positive, we use the probability of the \"Yes\" answer $P(Yes \\mid q)$ to represent the score it can gain. And we utilize the probability of the \"No\" answer $P(No \\mid q)$ if the question is negative, as illustrated in Eq. 2. In short, our intuition behind this design is that as long as the video quality goes up with which answer, we calculate our entity-level score based on that answer's probability. Then, we utilize entity-level question/answering pairs that are under the same quality assessment dimension to formulate our dimension-level score $S_{dim}$ . To be specific, we simply calculate the linear summation of multiple answers' probability $S_{entity}$ , as illustrated in Eq. 3.", + "bbox": [ + 511, + 376, + 915, + 556 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nS _ {d i m} = \\sum_ {i = 1} ^ {N} S _ {e n t i t y} i \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 653, + 561, + 911, + 599 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In the end, we derive the overall-level score $S_{\\text{overall}}$ with the weighted average of five distinct dimension scores $S_{dim}$ in Eq. 4.", + "bbox": [ + 513, + 604, + 911, + 633 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nS _ {\\text {o v e r a l l}} = \\sum_ {i = 1} ^ {5} w _ {i}. S _ {\\text {d i m}} i \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 640, + 637, + 911, + 672 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In short, we propose the entity-level VQA framework FingER, which consists of three parts: (i) entity-level question generation, (ii) the fine-tuned MLLM with reasoning output, and (iii) the hierarchical scoring function that converts token probability to multi-level scores.", + "bbox": [ + 511, + 676, + 915, + 746 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 Entity-level Dataset with Reasoning", + "text_level": 1, + "bbox": [ + 513, + 758, + 851, + 775 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this section, we introduce the construction pipeline of our entity-level instruction tuning dataset, named FingER-Instruct-60k.", + "bbox": [ + 511, + 777, + 915, + 805 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.1 Prompt and T2V Model Selection. Based on VideoGenEval [42] dataset, our instruction tuning dataset is composed of 420 diverse text prompts and 3.3k AI-generated videos produced by 8 modern T2V models, including closed-source models: Kling, Luma, PixVerse, Vidu, Qingying, and open-sourced models: Mochi-1 [24], CogVideoX [38], Open-Sora [45]. We utilize all 420 text", + "bbox": [ + 511, + 811, + 915, + 896 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "prompts from the T2V session [45], which cover a diverse range of complex scenarios, including human-centric activities, material and spatial relationships, as well as animal and text generations. These prompts are derived from real-life user inputs. As for the T2V model selection, we denote models that understand and obey most of the common sense and physical laws, and generate time-consistent videos without obvious temporal distortions as the high-quality model. We select the generative models uniformly based solely on the quality of their generated videos, spanning from high-quality models to average-quality models, for a more diverse training data distribution.", + "bbox": [ + 81, + 106, + 483, + 258 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2.2 Entity-level Question Generation and Annotation. Our multi-dimensional entity-level question generation starts with understanding users' input prompts and extracting the entities within. We use GPT-40 [13] for prompt understanding and entity extraction, with abundant in-context learning examples provided. Then, we perform the entity-level question generation for our five distinct assessment dimensions. For each entity, we prompt the LLM with task introduction, assessment dimension explanation with several key points to focus on, user's input prompt, the extracted entity, and the most important in-context learning examples. And we extract the generated questions with regular expression matching.", + "bbox": [ + 81, + 273, + 483, + 426 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For data annotation, we engaged 10 professional annotators to complete the task of annotating $60\\mathrm{k}$ question/answer pairs. Inter-annotator agreement was ensured through multiple rounds of small-scale pilot annotations, and the entire process took approximately one month to complete.", + "bbox": [ + 81, + 426, + 483, + 494 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2.3 Reasoning Generation and Verification. We employ the powerful closed-source MLLM [25] to generate the initial version of the reasoning process. Specifically, we prompt the MLLM with the assessment dimension explanation, user prompt, in-context learning examples, and the entity-level question along with its human-annotated result. An interesting finding is that when the MLLM is provided with the correct answer to the entity-level question, the generated reasoning process for explaining the answer is more reasonable than when directly generating the answer and its reason. Rather than using the MLLM-generated reasoning process directly, we conduct thorough human verification to ensure the quality of our reasoning training data.", + "bbox": [ + 81, + 510, + 482, + 676 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "With aforementioned entity-level questions, human-annoted answers and detailed reasoning process, we formulate our instruction tuning dataset FingER-Instruct-60k, which serves as our basis for the model training in the next section.", + "bbox": [ + 81, + 676, + 483, + 732 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3 Instruction Tuning and GRPO Training", + "text_level": 1, + "bbox": [ + 83, + 751, + 447, + 767 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We use Qwen2.5-VL-7B-Instruct [1] as our base model and apply supervised fine-tuning, SFT with reasoning and reinforcement learning on it.", + "bbox": [ + 81, + 768, + 483, + 811 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3.1 Supervised Fine-Tuning. We directly train the base model on FingER-Instruct-60k, the response of model only contains \"Yes\" or \"No\" answer following the next token prediction paradigm. It means the model only needs to learn predicting the correct answer without any reasoning process. The loss function is Cross-Entropy", + "bbox": [ + 81, + 825, + 483, + 896 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Loss:", + "bbox": [ + 514, + 107, + 550, + 119 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {C E} = - \\sum_ {i = 1} ^ {N} y _ {i} \\log \\left(p _ {i}\\right) \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 640, + 118, + 913, + 156 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3.2 Supervised Fine-Tuning with Reasoning. We also train base model on FingER-Instruct-60k, but the difference compared to Supervised Fine-Tuning is the model needs to learn predicting the correct answer within $<$ answer $>$ ... $<$ /answer $>$ tag and its reasoning processes within $<$ reason $>$ ... $<$ /reason $>$ tag. We apply prompt engineering on the input tokens to reach this difference. The loss also contains the gap of reasoning processes and the gap of answers.", + "bbox": [ + 513, + 161, + 913, + 272 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3.3 GRPO Training. We employ GRPO [23] to enhance reasoning inference performance, exploring two protocols: (i) Zero-GRPO, which relies solely on reinforcement learning without initial supervised data; and (ii) GRPO with cold-start Supervised Fine-Tuning, which combines initial supervised learning with subsequent reinforcement optimization.", + "bbox": [ + 513, + 279, + 916, + 362 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Zero-GRPO. Zero-GRPO is an exploratory attempt that is initiated directly from Qwen-2.5-VL [1] and uses RL to implicitly improve reasoning abilities without annotated reason. For each video question pair, we first sample a group of outputs $\\{o_1,o_2,\\dots,o_G\\}$ by old policy $\\pi_{\\theta_{old}}(o_i|v,q)$ , $v$ denotes the video that needs to be evaluated, $q$ denotes the question for each entity and dimension. Then update the policy model $\\pi_{\\theta}$ by minimizing the following loss.", + "bbox": [ + 513, + 369, + 916, + 467 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {G R P O} (\\theta) = - \\mathbb {E} [ q \\sim P (Q), \\{o _ {i} \\} _ {i = 1} ^ {G} \\sim \\pi_ {\\theta_ {o l d}} (O | v, q) ] \\\\ \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\left(\\min \\left(\\frac {\\pi_ {\\theta} (o _ {i} | v , q)}{\\pi_ {\\theta o l d} (o _ {i} | v , q)} * A d v _ {i}, \\right. \\right. \\\\ \\left. \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta} \\left(o _ {i} \\mid v , q\\right)}{\\pi_ {\\theta o l d} \\left(o _ {i} \\mid v , q\\right)}, 1 - \\epsilon , 1 + \\epsilon\\right) * A d v _ {i}\\right) \\tag {6} \\\\ \\left. + \\beta \\mathbb {D} _ {K L} \\left(\\pi_ {\\theta} | | \\pi_ {r e f}\\right)\\right) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 560, + 470, + 913, + 599 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {D} _ {K L} \\left(\\pi_ {\\theta} \\| \\pi_ {r e f}\\right) = \\frac {\\pi_ {r e f} \\left(o _ {i} | v , q\\right)}{\\pi_ {\\theta} \\left(o _ {i} | v , q\\right)} - l o g \\frac {\\pi_ {r e f} \\left(o _ {i} | v , q\\right)}{\\pi_ {\\theta} \\left(o _ {i} | v , q\\right)} - 1 \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 550, + 611, + 913, + 643 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "$\\beta$ denotes the coefficient of Kullback-Leibler Divergence [15] between base model and policy model, $\\epsilon$ denotes the threshold of clip. $Adv_{i}$ is the advantage which is the normalization of a group of rewards $\\{r_1,r_2,\\dots,r_G\\}$ computed from outputs within each group:", + "bbox": [ + 513, + 645, + 915, + 702 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nA d v _ {i} = \\frac {r _ {i} - M e a n \\left\\{r _ {1} , r _ {2} , \\dots , r _ {g} \\right\\}}{\\operatorname {S t d} \\left\\{r _ {1} , r _ {2} , \\dots , r _ {G} \\right\\}} \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 617, + 710, + 913, + 742 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "$r_i$ is composed of two reward functions:", + "bbox": [ + 514, + 743, + 759, + 756 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nr _ {i} = r _ {\\text {a c c u r a c y} _ {i}} + r _ {\\text {f o r m a t} _ {i}} \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 633, + 772, + 913, + 787 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nr _ {\\text {a c c u r a c y} _ {i}} = \\left\\{ \\begin{array}{l l} 1. 0 & \\text {i f a n s w e r} _ {i} = G T _ {i} \\\\ 0. 0 & \\text {e l s e} \\end{array} \\right. \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 599, + 791, + 913, + 827 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nr _ {\\text {f o r m a t} i} = \\left\\{ \\begin{array}{l l} 1. 0 & \\text {i f} o _ {i} \\text {i n c l u d e s c o r r e c t f o r m a t} \\\\ 0. 0 & \\text {e l s e} \\end{array} \\right. \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 571, + 830, + 913, + 866 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Correct format means the output $o_i$ contains two tags:", + "bbox": [ + 514, + 868, + 844, + 881 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "$< \\text{answer}>\\ldots < / \\text{answer}>$ and $< \\text{reason}>\\ldots < / \\text{reason}>$", + "bbox": [ + 531, + 882, + 898, + 895 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "\"Yes\" or \"No\" token only appears within the answer tag, and the reasoning process only appears within reason tag.", + "bbox": [ + 83, + 106, + 480, + 136 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "GRPO with cold-start Supervised Fine-Tuning. DeepSeek-R1 demonstrated that fine-tuning on an annotated dataset with reasoning processes before applying reinforcement learning (RL) yields better performance than directly using RL [10]. We adopt this approach in our supervised fine-tuning model. The sole difference between Zero-GRPO and GRPO with cold-start Supervised Fine-Tuning lies in the base model: the latter is initialized from a model pre-trained on annotated data containing reasoning processes.", + "bbox": [ + 81, + 141, + 482, + 253 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 83, + 263, + 218, + 279 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1 Datasets and Evaluation Metrics", + "text_level": 1, + "bbox": [ + 83, + 284, + 387, + 297 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1.1 Datasets. We split 185 generated videos (around $5\\%$ of whole data) with 3.5k entity-level questions from 5 distinct quality assessment dimensions to formulate our FingER-test dataset. Regarding the public benchmarks, we adopt the popular GenAI-Bench [17] and recently released MonetBench [37] for performance evaluation. GenAI-Bench contains 800 unique text prompts paired with 4 T2V models, and each generated video has MOS (Mean Opinion Scores) annotated by 3 annotators. MonetBench consists of 1000 different text prompts, each paired with 2 T2V models. Each pair of videos is generated with the same prompt but different video generation models. MonetBench annotates the video pair with human preferences, including \"win\", \"lose\", and \"tie\" options.", + "bbox": [ + 81, + 301, + 482, + 470 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1.2 Evaluation Metrics. We report the accuracy (Acc) of \"Yes\" or \"No\" answers, the Pearson linear correlation coefficient (PLCC), and the Spearman rank correlation coefficient (SRCC) on our proposed FingER-test dataset. We evaluate our models with and without token probability calculation, denoted by $(w / o\\text{prob})$ and $(w/\\text{prob})$ in Tab. 1 and Tab. 2. Following previous works in [11, 19], we utilize the SRCC and the PLCC for evaluating model's performance on GenAI-Bench. And we use pairwise accuracy as the metrics for human preference evaluation on MonetBench and report tau and diff, followed [9, 43].", + "bbox": [ + 81, + 474, + 482, + 613 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2 Implementation Details", + "text_level": 1, + "bbox": [ + 83, + 626, + 321, + 641 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Based on Qwen-2.5-VL-7B [1], we fine-tune our model with the following experiment settings: learning rate of $5.0\\mathrm{e - 6}$ , global batch size of 32, video input fps (frame-per-second) is set to 2, and video maximum input resolution is set to $448\\times 448$ pixels. We utilize LLaMA-Factory [44] as our supervised fine-tuning (SFT) codebase. We perform SFT on our proposed FingER-Instruct-60k dataset for 2 epochs with 8 NVIDIA H20 GPUs, and the training steps are the same for the model trained with extra reasoning process. As for the settings of our reinforcement learning (RL) experiments, we employ Huggingface-TRL [31] as our RL fine-tuning tool with following hyper-parameters to implement GRPO: $\\beta = 0.04$ , and the number of group $G = 16$ , $\\epsilon = 0.2$ , $\\mu = 1$ , the initial learning rate of RL is $5.0\\mathrm{e - 7}$ . We train Zero-GRPO and GRPO with cold-start for 2k steps on 4 NVIDIA H20 GPUs.", + "bbox": [ + 81, + 643, + 482, + 837 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3 Zero-shot Performance on FingER-test", + "text_level": 1, + "bbox": [ + 83, + 849, + 441, + 864 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We report the zero-shot performance of Qwen2.5-VL across five dimensions on our dataset. Through ablations on resolution, frame", + "bbox": [ + 83, + 867, + 480, + 896 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/f6db93766cb5f466189876315472945b0af1547224588b7f6baaa83e5488d4b6.jpg", + "image_caption": [ + "Figure 3: Zero-shot performance on five distinct assessment dimensions with different input resolution and fps." + ], + "image_footnote": [], + "bbox": [ + 517, + 102, + 911, + 238 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "rate (fps), and evaluation granularity, we reveal the capabilities of the base model to handle different dimensions, and further demonstrate the crucial importance of integrating entity-level evaluation.", + "bbox": [ + 511, + 314, + 913, + 356 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Increasing resolution and fps leads to slight improvements. Fig. 3 illustrates the accuracy across five dimensions when prompted with entity-level questions. We can see that the accuracy curves show slight improvements with increasing resolutions or frame rates (fps), albeit at a significant computational cost. These results suggest that resolution and fps are not the primary factors of performance enhancement. Consequently, for efficiency we adopt $448 \\times 448$ pixels and 2 fps as the default settings for subsequent zero-shot and supervised fine-tuning (SFT) experiments.", + "bbox": [ + 511, + 356, + 913, + 479 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Performance varies significantly across different dimensions. As shown in Fig. 3, the zero-shot accuracy for visual quality is exceptionally low at $26.1\\%$ , while factual consistency achieves $57.6\\%$ . In contrast, dimensions like text alignment show higher accuracy at $80.59\\%$ , likely due to the base model's inherent capabilities from pre-training on caption data. We believe that the notably low accuracy in visual quality is primarily attributed to misalignment from AI-generated videos, and the main challenges still lie in dimensions requiring in-depth reasoning, such as factual consistency, temporal consistency, and text alignment, which will be further demonstrated in the following section.", + "bbox": [ + 511, + 481, + 913, + 632 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Integrating entity-level evaluations brings a substantial performance gain. To validate the efficacy of our entity-level QA framework, we conduct experiments across three evaluation granularities: overall level, dimension level, and our proposed entity level, as detailed in Tab. 1. The overall level (1st row) prompts the model with an overall assessment rating from 1 to 4, accompanied by detailed evaluation criteria, while the dimension level (2nd row) prompts model to rate each dimension from 1 to 4, which are then averaged to get a final score. The results of our proposed entity-level (3rd and 4th rows) are reported with and without a probability calculation strategy introduced in Sec. 3.1, and furthermore, we instruct the model to provide explanatory reasoning along with answers (last two rows). Compared to the entity-level framework, both the overall and dimension levels exhibit substantial performance degradation across all dimensions, indicating that fine-grained evaluation substantially enhances the model's performance. It is worth noting that incorporating explanatory reasoning does not bring improvements, revealing the inherent limitations of the base model in understanding AI-generated videos.", + "bbox": [ + 511, + 633, + 913, + 896 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/285e12cf77b75a7f0f08e2efaecc08f1bfa175f1b9a591e9fcbf7e2221583f34.jpg", + "table_caption": [ + "Table 1: Correlation between model Zero-shot answer and human reference on FingER-test" + ], + "table_footnote": [], + "table_body": "
MethodVisual QualityTemporalDynamic DegreeText AlignmentFactualOverall
Qwen2.5-VLAcc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCC
Overall Level------/30.68/29.27
Dimension Level-/35.06/35.54-/16.05/17.06-/14.81/14.09-/33.68/32.62-/13.86/12.28-/52.32/61.14
Entity (w/o prob)25.33/1.85/5.2278.72/83.26/83.9172.87/51.04/48.9881.6/70.68/73.4458.34/51.03/53.2766.50/80.86/83.71
Entity (w/ prob)25.33/40.60/40.9478.72/84.51/85.4472.87/56.48/56.8581.6/74.09/76.4958.34/57.45/58.6766.50/81.23/85.26
+Reason (w/o prob)45.71/49.97/49.6177.65/83.12/83.8975.21/54.30/52.8781.08/73.24/75.3140.51/17.43/23.5563.96/73.40/79.15
+Reason (w/ prob)45.71/46.29/49.6477.65/84.60/83.8975.21/48.88/52.8081.08/72.38/75.3540.51/29.27/23.5063.96/73.29/79.18
", + "bbox": [ + 86, + 132, + 908, + 250 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/d9b02a1b058829ef36e4a6b35714adc0c10256dc2c1d78a1cda461ad9c1d4ccf.jpg", + "table_caption": [ + "Table 2: Correlation between SFT/RL model answer and human reference on FingER-test (Z-GRPO means Zero-GRPO)" + ], + "table_footnote": [], + "table_body": "
MethodVisual QualityTemporalDynamic DegreeText AlignmentFactualOverall
Acc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCC
GPT-4o [13]62.19/56.24/57.9377.83/78.64/79.1368.31/54.14/57.0283.41/72.20/74.3358.77/48.93/49.5169.92/81.25/82.36
VideoScore [11]-/22.80/18.55-/23.84/26.06-/9.49/7.18-/19.18/13.87-/22.93/18.31-/20.39/17.68
Qwen2.5-VL [1]25.33/40.60/40.9478.72/84.51/85.4472.87/56.48/56.8581.6/74.09/76.4958.34/57.45/58.6766.50/81.23/85.26
Z-GRPO (w/o prob)76.01/73.39/70.4678.01/83.13/83.8277.93/69.74/68.4784.46/73.80/75.9955.21/47.47/50.3374.51/83.46/86.56
Z-GRPO (w/ prob)76.01/71.83/71.9778.01/81.81/83.8677.93/67.49/68.5184.46/74.38/76.2855.21/42.21/50.1574.51/83.24/86.82
FingER (w/o prob)83.78/83.48/82.5383.33/83.13/83.7083.23/71.37/67.9582.77/70.94/73.7572.89/64.12/64.6181.25/88.87/89.67
FingER (w/ prob)83.78/85.31/85.2283.33/86.24/86.9983.23/77.07/74.7382.77/73.85/77.9872.89/70.99/69.2681.25/90.23/91.41
+Reason (w/o prob)84.05/81.51/81.0084.04/85.88/86.6382.49/69.22/68.2286.79/77.87/79.7774.03/67.47/68.4182.33/89.79/91.64
+Reason (w/ prob)84.05/83.85/83.8784.04/86.51/87.0982.49/76.11/76.7086.79/79.34/83.1674.03/71.70/70.2782.33/90.31/92.04
+GRPO (w/o prob)82.30/80.62/78.0982.98/85.08/85.5781.63/65.54/64.9285.88/75.74/77.9174.04/68.65/70.7381.41/89.26/91.25
+GRPO (w/ prob)82.30/83.76/83.5182.98/86.64/87.4381.63/75.05/74.6885.88/78.32/82.6374.04/71.87/72.0381.41/90.43/92.41
", + "bbox": [ + 86, + 292, + 908, + 473 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/c31a7b2de430267fedcf4b392ea6c547efde7a16d17b2e3d84f8d6eb0d7393c5.jpg", + "table_caption": [ + "Table 3: Zero-shot Evaluation Results on Public Benchmarks" + ], + "table_footnote": [], + "table_body": "
MethodGenAI-Bench[17]MonetBench[37]
SRCCPLCCtaudiff
GPT-4o[13]35.7936.6145.7048.30
Qwen2.5-VL[1]46.6244.2946.7044.27
VideoScore[11]42.2240.6249.1054.90
VQAScore[19]52.7050.6056.1059.50
Zero-GRPO49.5844.3951.3051.34
FingER54.1352.6053.9057.31
+ Reason56.6857.2557.8062.07
+ GRPO57.0356.5958.0062.80
", + "bbox": [ + 86, + 518, + 475, + 691 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4 SFT and RL Performance on FingER-test", + "text_level": 1, + "bbox": [ + 81, + 724, + 455, + 739 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this section, we report the performance of our reasoning model on FingER-test using different training protocols including SFT with answers, SFT with reasons, zero GRPO, and GRPO with a cold start, we also provide results using the closed-source model GPT-40 and VideScore [11] for comparisons, as detailed in Tab. 2. Note that all these results, except for VideoScore [11], are obtained by entity-level evaluations for fair comparisons.", + "bbox": [ + 81, + 742, + 480, + 840 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Our model, trained with only answers, demonstrates significant performance improvements over the base model, achieving overall gains of 14.75/9.00/6.15 in Acc/SRCC/PLCC, respectively. Substantial improvements are observed in the dimensions of visual quality,", + "bbox": [ + 81, + 840, + 482, + 896 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "dynamic degree, and factual consistency. Note that the improvement in the text alignment dimension is limited, mainly due to its inherent capabilities derived from pre-training data.", + "bbox": [ + 511, + 494, + 913, + 535 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Incorporating additional reasoning during training further boosts the performance, particularly in the dimensions of text alignment, factual consistency, and temporal consistency. For the text alignment dimension, the SFT with reasoning harvests performance gains with 4.02/5.49/5.18 in Acc/SRCC/PLCC. These improvements underscore the importance of in-depth video understanding to achieve higher performance in these dimensions.", + "bbox": [ + 511, + 535, + 913, + 632 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We further investigate the reasoning training using RL, which includes two kinds of training procedures: (1) Zero-GRPO, and (2) GRPO initialized with a cold-start from reasoning SFT training. The results presented in Table 2 reveal that Zero-GRPO fails to predict correct answers. Upon closer examination of the training process, we identified that the issue stems from the reasoning component. Zero-GRPO generates reasons that resemble captions rather than logical reasoning. In contrast, when GRPO is applied with a cold-start initialization from our reasoning SFT model, it is able to surpass the SFT model with only 1k additional training steps. Among these dimensions, we observed steady performance improvements in the temporal and factual consistency dimensions, with boosts of $1.15 / 0.88 / 2.77$ in factual consistency. We believe that the reasoning cold-start teaches the model to reason in a rough manner, while GRPO guides it towards adopting reasons with correct answers, thereby incentivizing the reasoning capability in the model.", + "bbox": [ + 511, + 632, + 913, + 866 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Moreover, we evaluate the performance on our proposed FingER-test dataset with closed-source MLLM [13] (1st row), and VideoScore", + "bbox": [ + 511, + 867, + 913, + 895 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Text Prompt: The camera follows a person standing alone by the lake, gazing at the distant sunset, with their reflection mirrored on the water's surface.", + "bbox": [ + 91, + 102, + 696, + 112 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/54976c59ce4846e46e0766e44bd28c157ead9e397c4980a5824be6ab946e240b.jpg", + "image_caption": [ + "Figure 4: Qualitative results. We show several reasoning results outputted by our GRPO model." + ], + "image_footnote": [], + "bbox": [ + 86, + 112, + 911, + 554 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "[11] (2nd row), our proposed FingER outperforms those methods with a large margin across all five assessment dimensions.", + "bbox": [ + 81, + 604, + 482, + 632 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.5 Comparison on Public Benchmarks", + "text_level": 1, + "bbox": [ + 81, + 669, + 416, + 686 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Tab. 3 demonstrates the consistent improvements achieved by our method on two public benchmarks. We compare our methods with GPT-40, Qwen2.5-VL and two other approaches. Specifically, with only Yes/No answer prediction, we already outperform all methods on GenAI-Bench, indicating the effectiveness of our fine-grained evaluation framework. Training with reasons and GRPO with a cold-start leads to further improvements with a final $8.21\\% / 11.83\\%$ SRCC/PLCC relative performance boost. On MonetBench, without any weight fitting, we just average scores of five dimensions, our method is able to achieve $3.39\\% / 5.55\\%$ relative improvements of tau/diff. It is worth noting that VideoScore [11] is trained using 37.6k training videos, while VQAScore [19] utilizes 665k samples, we outperform these methods with only 3.3k training videos without additional training samples from other sources, which is at most one-tenth of the training size adopted by other methods.", + "bbox": [ + 81, + 688, + 482, + 896 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5 Conclusion", + "text_level": 1, + "bbox": [ + 514, + 603, + 638, + 616 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we emphasize the critical importance of integrating fine-grained reasoning into AI-generated video quality assessment, and we propose FingER, an entity-level fine-grained quality assessment framework with five distinct evaluation dimensions for AI-generated videos. To bridge the gap between non-AI videos and AI-generated videos, we construct a high-quality dataset, FingER-Instruct-60k, which consists of 3.3k videos generated by modern T2V models and 60k entity-level question / answering / reasoning pairs. Based on this dataset, we explore multiple training protocols to best incentivize the model's reasoning capability, including reason SFT, zero GRPO and GRPO with a reasoning cold-start. Extensive experiments demonstrate that by utilizing GRPO training with a cold-start, our method not only achieves the best performance on our dataset, but also outperforms other methods and closed-source models on two public benchmarks. And it is worth noting that we achieve SOTA performance with only 3.3k training samples.", + "bbox": [ + 511, + 621, + 916, + 857 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 84, + 104, + 176, + 119 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. 2025. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923 (2025).", + "[2] Fan Bao, Chendong Xiang, Gang Yue, Guande He, Hongzhou Zhu, Kaiwen Zheng, Min Zhao, Shilong Liu, Yaole Wang, and Jun Zhu. 2024. Vudu: a highly consistent, dynamic and skilled text-to-video generator with diffusion models. arXiv preprint arXiv:2405.04233 (2024).", + "[3] Maciej Besta, Nils Blach, Ales Kubicek, Robert Gerstenberger, Michal Podstawski, Lukas Gianinazzi, Joanna Gajda, Tomasz Lehmann, Hubert Niewiadomski, Piotr Nczyk, et al. 2024. Graph of thoughts: Solving elaborate problems with large language models. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 38. 17682-17690.", + "[4] Tim Brooks, Bill Peebles, Connor Holmes, Will DePue, Yufei Guo, Li Jing, David Schnurr, Joe Taylor, Troy Luhman, Eric Luhman, et al. 2024. Video generation models as world simulators. 2024. URL https://openai.com/research/video-generation-models-as-world-simulators-3 (2024), 1.", + "[5] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. 2024. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 24185-24198.", + "[6] Jaemin Cho, Yushi Hu, Jason M Baldridge, Roopal Garg, Peter Anderson, Ranjay Krishna, Mohit Bansal, Jordi Pont-Tuset, and Su Wang. 2024. Davidsonian Scene Graph: Improving Reliability in Fine-grained Evaluation for Text-to-Image Generation. In ICLR.", + "[7] Xiangxiang Chu, Hailang Huang, Xiao Zhang, Fei Wei, and Yong Wang. 2025. GPG: A Simple and Strong Reinforcement Learning Baseline for Model Reasoning. arXiv preprint arXiv:2504.02546 (2025).", + "[8] Xiangxiang Chu, Limeng Qiao, Xinyu Zhang, Shuang Xu, Fei Wei, Yang Yang, Xiaofei Sun, Yiming Hu, Xinyang Lin, Bo Zhang, et al. 2024. MobilevIm v2: Faster and stronger baseline for vision language model. arXiv preprint arXiv:2402.03766 (2024).", + "[9] Daniel Deutsch, George Foster, and Markus Freitag. 2023. Ties Matter: Meta-Evaluating Modern Metrics with Pairwise Accuracy and Tie Calibration. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing. 12914-12929.", + "[10] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948 (2025).", + "[11] Xuan He, Dongfu Jiang, Ge Zhang, Max Ku, Achint Soni, Sherman Siu, Haonan Chen, Abhranil Chandra, Ziyan Jiang, Aaran Arulraj, et al. 2024. VideoScore: Building Automatic Metrics to Simulate Fine-grained Human Feedback for Video Generation. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing. 2105-2123.", + "[12] Hailang Huang, Yong Wang, Zixuan Huang, Huaqiu Li, Tongwen Huang, Xi-angxiang Chu, and Richong Zhang. 2024. MMGenBench: Evaluating the Limits of LMMs from the Text-to-Image Generation Perspective. arXiv preprint arXiv:2411.14062 (2024).", + "[13] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. 2024. GPT-4o System Card. arXiv preprint arXiv:2410.21276 (2024).", + "[14] Haoning Wu Xintao Wang Yixiao Ge Xiaodong Cun David Junhao Zhang Jia-Wei Liu Yuchao Gu Rui Zhao Weisi Lin Wynne Hsu Ying Shan Jay Zhangjie Wu, Guian Fang and Mike Zheng Shou. 2024. Towards A Better Metric for Text-to-Video Generation. arXiv:2401.07781 (2024).", + "[15] Solomon Kullback. 1951. Kullback-leibler divergence.", + "[16] Xin Lai, Zhuotao Tian, Yukang Chen, Senqiao Yang, Xiangru Peng, and Jiaya Jia. 2024. Step-dpo: Step-wise preference optimization for long-chain reasoning of llms. arXiv preprint arXiv:2406.18629 (2024).", + "[17] Baiqi Li, Zhiqiu Lin, Deepak Pathak, Jiayao Emily Li, Xide Xia, Graham Neubig, Pengchuan Zhang, and Deva Ramanan. 2024. GenAI-bench: A holistic benchmark for compositional text-to-visual generation. In Synthetic Data for Computer Vision Workshop@ CVPR 2024.", + "[18] Mingxing Li, Rui Wang, Lei Sun, Yancheng Bai, and Xiangxiang Chu. 2025. Next Token Is Enough: Realistic Image Quality and Aesthetic Scoring with Multimodal Large Language Model. arXiv preprint arXiv:2503.06141 (2025).", + "[19] Zhiqiu Lin, Deepak Pathak, Baiqi Li, Jiayao Li, Xide Xia, Graham Neubig, Pengchuan Zhang, and Deva Ramanan. 2024. Evaluating text-to-visual generation with image-to-text generation. In European Conference on Computer Vision. Springer, 366–384.", + "[20] Xinrang Ling, Chen Zhu, Meiqi Wu, Hangyu Li, Xiaokun Feng, Cundian Yang, Aiming Hao, Jiashu Zhu, Jiahong Wu, and Xiangxiang Chu. 2025. VMBench: A Benchmark for Perception-Aligned Video Motion Generation. arXiv preprint arXiv:2503.10076 (2025)." + ], + "bbox": [ + 86, + 122, + 482, + 877 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[21] Zichen Liu, Changyu Chen, Wenjun Li, Penghui Qi, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. 2025. Understanding r1-zero-like training: A critical perspective. arXiv preprint arXiv:2503.20783 (2025).", + "[22] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. 2025. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785 (2025).", + "[23] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. 2024. Deepseemath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300 (2024).", + "[24] Genmo Team. 2024. Mochi 1. https://github.com/genmoai/models.", + "[25] Gemini Team, Petko Georgiev, Ving Ian Lei, Ryan Burnell, Libin Bai, Anmol Gulati, Garrett Tanzer, Damien Vincent, Zhufeng Pan, Shibo Wang, et al. 2024. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530 (2024).", + "[26] Zachary Teed and Jia Deng, 2020. Raft: Recurrent all-pairs field transforms for optical flow. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part II 16. Springer, 402-419.", + "[27] Zicheng Zhang Chunyi Li Haoning Wu Xiongkuo Min Guangtao Zhai Tengchuan Kou, Xiaohong Liu and Ning Liu. 2024. Subjective-aligned dataset and metric for text-to-video quality assessment. arXiv preprint arXiv:2403.11956 (2024).", + "[28] Wojciech Zaremba Vicki Cheung Alec Radford Tim Salimans, Ian Goodfellow and Xi Chen. 2016. Improved techniques for training gans. Advances in neural information processing systems, 29 (2016).", + "[29] Wojciech Zaremba Vicki Cheung Alec Radford Tim Salimans, Ian Goodfellow and Xi Chen. 2021. Chenfei Wu, Lun Huang, Qianxi Zhang, Binyang Li, Lei Ji, Fan Yang, Guillermo Sapiro, and Nan Duan. arXiv preprint arXiv:2104.14806 (2021).", + "[30] Thomas Unterthiner, Sjoerd Van Steenkiste, Karol Kurach, Raphael Marinier, Marcin Michalski, and Sylvain Gelly. 2019. FVD: A new metric for video generation. ICLR 2019 Workshop DeepGenStruct (2019).", + "[31] Leandro von Werra, Younes Belkada, Lewis Tunstall, Edward Beeching, Tristan Thrush, Nathan Lambert, Shengyi Huang, Kashif Rasul, and Quentin Galloudec. 2020. TRL: Transformer Reinforcement Learning. https://github.com/huggingface/trl.", + "[32] Bram Wallace, Meihua Dang, Rafael Rafailov, Linqi Zhou, Aaron Lou, Senthil Purushwalkam, Stefano Ermon, Caiming Xiong, Shafiq Joty, and Nikhil Naik. 2024. Diffusion model alignment using direct preference optimization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 8228-8238.", + "[33] Yibin Wang, Zhiyu Tan, Junyan Wang, Xiaomeng Yang, Cheng Jin, and Hao Li. 2024. Lift: Leveraging human feedback for text-to-video model alignment. arXiv preprint arXiv:2412.04814 (2024).", + "[34] Yibin Wang, Yuhang Zang, Hao Li, Cheng Jin, and Jiaqi Wang. 2025. Unified Reward Model for Multimodal Understanding and Generation. arXiv preprint arXiv:2503.05236 (2025).", + "[35] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems 35 (2022), 24824-24837.", + "[36] Haoning Wu, Zicheng Zhang, Weixia Zhang, Chaofeng Chen, Liang Liao, Chunyi Li, Yixuan Gao, Annan Wang, Erli Zhang, Wenxiu Sun, et al. 2024. Q-Align: Teaching LMMs for Visual Scoring via Discrete Text-Defined Levels. In International Conference on Machine Learning. PMLR, 54015-54029.", + "[37] Jiazheng Xu, Yu Huang, Jiale Cheng, Yuanming Yang, Jiajun Xu, Yuan Wang, Wenbo Duan, Shen Yang, Qunlin Jin, Shurun Li, et al. 2024. Visionreward: Fine-grained multi-dimensional human preference learning for image and video generation. arXiv preprint arXiv:2412.21059 (2024).", + "[38] Zhuoyi Yang, Jiayan Teng, Wendi Zheng, Ming Ding, Shiyu Huang, Jiazheng Xu, Yuanming Yang, Wenyi Hong, Xiaohan Zhang, Guanyu Feng, et al. 2024. Cogvideox: Text-to-video diffusion models with an expert transformer. arXiv preprint arXiv:2408.06072 (2024).", + "[39] Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Tom Griffiths, Yuan Cao, and Karthik Narasimhan. 2023. Tree of thoughts: Deliberate problem solving with large language models. Advances in neural information processing systems 36 (2023), 11809-11822.", + "[40] Xuebo Liu XintaoWang Yong Zhang Haoxin Chen Yang Liu Tieyong Zeng Raymond Chan Yaofang Liu, Xiaodong Cun and Ying Shan. 2024. Evalcrafter: Benchmarking and evaluating large video generation models. (2024).", + "[41] Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. 2025. LIMO: Less is More for Reasoning. arXiv preprint arXiv:2502.03387 (2025).", + "[42] Ailing Zeng, Yuhang Yang, Weidong Chen, and Wei Liu. 2024. The Dawn of Video Generation: Preliminary Explorations with SORA-like Models. arXiv preprint arXiv:2410.05227 (2024).", + "[43] Jingyi Zhang, Jiaxing Huang, Sheng Jin, and Shijian Lu. 2024. Vision-language models for vision tasks: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence (2024)." + ], + "bbox": [ + 517, + 108, + 913, + 883 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[44] Yaowei Zheng, Richong Zhang, Junhao Zhang, Yanhan Ye, Zheyan Luo, Zhangchi Feng, and Yongqiang Ma. 2024. LlamaFactory: Unified Efficient Fine-Tuning of $100+$ Language Models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations). Association for Computational Linguistics, Bangkok, Thailand. http://arxiv.org/abs/2403.13372", + "[45] Zangwei Zheng, Xiangyu Peng, Tianji Yang, Chenhui Shen, Shenggui Li, Hongxin Liu, Yukun Zhou, Tianyi Li, and Yang You. 2024. Open-sora: Democratizing" + ], + "bbox": [ + 84, + 108, + 480, + 191 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "efficient video production for all. arXiv preprint arXiv:2412.20404 (2024).", + "[46] Hengguang Zhou, Xirui Li, Ruochen Wang, Minhao Cheng, Tianyi Zhou, and Cho-Jui Hsieh. 2025. R1-Zero's \"Aha Moment\" in Visual Reasoning on a 2B Non-SFT Model. arXiv preprint arXiv:2503.05132 (2025).", + "[47] Jiashuo Yu Fan Zhang Chenyang Si Yuming Jiang Yuanhan Zhang Tianxing Wu Qingyang Jin Nattapol Chanpaisit Yaohui Wang Xinyuan Chen Limin Wang Dahua Lin Yu Qiao Ziqi Huang, Yinan He and Ziwei Liu. 2023. Vbench: Comprehensive benchmark suite for video generative models. (2023)." + ], + "bbox": [ + 517, + 108, + 911, + 191 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10358/19c9a29c-ccbf-4591-9305-89a160f95b8c_model.json b/data/2025/2504_10xxx/2504.10358/19c9a29c-ccbf-4591-9305-89a160f95b8c_model.json new file mode 100644 index 0000000000000000000000000000000000000000..19a100c6f55c5e26ded2c1e2097d146dfe8fbbbf --- /dev/null +++ b/data/2025/2504_10xxx/2504.10358/19c9a29c-ccbf-4591-9305-89a160f95b8c_model.json @@ -0,0 +1,2134 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.263, + 0.061, + 0.707 + ], + "angle": 270, + "content": "arXiv:2504.10358v1 [cs.CV] 14 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.102, + 0.912, + 0.149 + ], + "angle": 0, + "content": "FingER: Content Aware Fine-grained Evaluation with Reasoning for AI-Generated Videos" + }, + { + "type": "text", + "bbox": [ + 0.193, + 0.158, + 0.271, + 0.173 + ], + "angle": 0, + "content": "Rui Chen" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.175, + 0.336, + 0.189 + ], + "angle": 0, + "content": "chenrui.chen@alibaba-inc.com" + }, + { + "type": "text", + "bbox": [ + 0.155, + 0.19, + 0.308, + 0.205 + ], + "angle": 0, + "content": "AMAP, Alibaba Group" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.206, + 0.281, + 0.22 + ], + "angle": 0, + "content": "Beijing, China" + }, + { + "type": "text", + "bbox": [ + 0.468, + 0.158, + 0.532, + 0.173 + ], + "angle": 0, + "content": "Lei Sun" + }, + { + "type": "text", + "bbox": [ + 0.419, + 0.175, + 0.581, + 0.189 + ], + "angle": 0, + "content": "ally.sl@alibaba-inc.com" + }, + { + "type": "text", + "bbox": [ + 0.423, + 0.19, + 0.576, + 0.205 + ], + "angle": 0, + "content": "AMAP, Alibaba Group" + }, + { + "type": "text", + "bbox": [ + 0.451, + 0.206, + 0.549, + 0.22 + ], + "angle": 0, + "content": "Beijing, China" + }, + { + "type": "text", + "bbox": [ + 0.726, + 0.158, + 0.808, + 0.175 + ], + "angle": 0, + "content": "Jing Tang" + }, + { + "type": "text", + "bbox": [ + 0.671, + 0.176, + 0.865, + 0.189 + ], + "angle": 0, + "content": "guangyu.tj@alibaba-inc.com" + }, + { + "type": "text", + "bbox": [ + 0.692, + 0.19, + 0.844, + 0.205 + ], + "angle": 0, + "content": "AMAP, Alibaba Group" + }, + { + "type": "text", + "bbox": [ + 0.719, + 0.206, + 0.817, + 0.22 + ], + "angle": 0, + "content": "Beijing, China" + }, + { + "type": "text", + "bbox": [ + 0.332, + 0.232, + 0.398, + 0.248 + ], + "angle": 0, + "content": "Geng Li" + }, + { + "type": "text", + "bbox": [ + 0.267, + 0.248, + 0.463, + 0.263 + ], + "angle": 0, + "content": "xiaofeng/lg@alibaba-inc.com" + }, + { + "type": "text", + "bbox": [ + 0.288, + 0.264, + 0.441, + 0.279 + ], + "angle": 0, + "content": "AMAP, Alibaba Group" + }, + { + "type": "text", + "bbox": [ + 0.316, + 0.28, + 0.413, + 0.294 + ], + "angle": 0, + "content": "Beijing, China" + }, + { + "type": "text", + "bbox": [ + 0.566, + 0.232, + 0.7, + 0.248 + ], + "angle": 0, + "content": "Xiangxiang Chu" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.248, + 0.757, + 0.262 + ], + "angle": 0, + "content": "chuxiangxiang.cxx@alibaba-inc.com" + }, + { + "type": "text", + "bbox": [ + 0.557, + 0.264, + 0.709, + 0.279 + ], + "angle": 0, + "content": "AMAP, Alibaba Group" + }, + { + "type": "text", + "bbox": [ + 0.585, + 0.28, + 0.682, + 0.294 + ], + "angle": 0, + "content": "Beijing, China" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.303, + 0.158, + 0.317 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.321, + 0.485, + 0.695 + ], + "angle": 0, + "content": "Recent advances in video generation have posed great challenges in the assessment of AI-generated content, particularly with the emergence of increasingly sophisticated models. The various inconsistencies and defects observed in such videos are inherently complex, making overall scoring notoriously difficult. In this paper, we emphasize the critical importance of integrating fine-grained reasoning into video evaluation, and we propose FingER, a novel entity-level reasoning evaluation framework that first automatically generates Fine-grained Entity-level questions, and then answers those questions by a Reasoning model with scores, which can be subsequently weighted summed to an overall score for different applications. Specifically, we leverage LLMs to derive entity-level questions across five distinct perspectives, which (i) often focus on some specific entities of the content, thereby making answering or scoring much easier by MLLMs, and (ii) are more interpretable. Then we construct a FingER dataset, consisting of approximately 3.3k videos and corresponding 60k fine-grained QA annotations, each with detailed reasons. Based on that, we further investigate various training protocols to best incentivize the reasoning capability of MLLMs for correct answer prediction. Extensive experiments demonstrate that a reasoning model trained using Group Relative Policy Optimization (GRPO) with a cold-start strategy achieves the best performance. Notably, our model surpasses existing methods by a relative margin of \\(11.8\\%\\) on GenAI-Bench and \\(5.5\\%\\) on Monet-Bench with only 3.3k training videos, which is at most one-tenth of the training samples utilized by other methods. Our code and dataset will be released soon." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.712, + 0.219, + 0.725 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.73, + 0.483, + 0.854 + ], + "angle": 0, + "content": "Recent advancements in Text-to-Video (T2V) generative models [2, 4, 45] have demonstrated significant progress in producing visually appealing and content-rich videos. For instance, post-Sora models such as Kling have shown the ability to generate high-resolution videos that closely adhere to textual prompts. However, these models often produce localized artifacts, inconsistencies, and violations of physical laws. These issues highlight the necessity for the development of robust and reliable quality assessment methods for AI-generated video content." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.855, + 0.483, + 0.897 + ], + "angle": 0, + "content": "Early research on evaluating AI-generated videos has primarily relied on feature-based metrics, such as the Frechet Video Distance (FVD) [30] and optical flow-based methods like RAFT [26]. While" + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.302, + 0.915, + 0.414 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.704, + 0.416, + 0.718, + 0.426 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.428, + 0.915, + 0.587 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.572, + 0.59, + 0.588, + 0.6 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image_caption", + "bbox": [ + 0.699, + 0.59, + 0.714, + 0.6 + ], + "angle": 0, + "content": "(c)" + }, + { + "type": "image_caption", + "bbox": [ + 0.838, + 0.59, + 0.853, + 0.6 + ], + "angle": 0, + "content": "(d)" + }, + { + "type": "image_caption", + "bbox": [ + 0.514, + 0.614, + 0.916, + 0.726 + ], + "angle": 0, + "content": "Figure 1: Advanced generation models often exhibit localized defects while maintaining overall visually appealing, as illustrated in (a), which requires fine-grained in-depth understanding. (b) and (c) show that even with detailed instructional prompts and entity-level questions, GPT-4o still fails to identify this hand deformation. (d) shows the effectiveness of our work by integrating reasoning model with fine-grained entity-level questions." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.757, + 0.915, + 0.897 + ], + "angle": 0, + "content": "these methods effectively assess overall visual quality and dynamic characteristics, they fall short in capturing nuanced aspects that require deeper semantic understanding and fine-grained reasoning. To address these limitations, recent studies have introduced MLLMs for more comprehensive evaluations. For example, VideoScore [11] proposes a framework that evaluates five distinct aspects of video quality using an MLLM to assign scores ranging from 1 to 4. VisionReward [37] aligns video generation with human perception by formulating predefined judgment questions and fine-tuning a video-based MLLM to compute weighted scores. Similarly, LiFT [33]" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.084, + 0.108, + 0.482, + 0.148 + ], + "angle": 0, + "content": "learns a reward model that provides reasons and scores across multiple aspects to align the generation model with human preferences. Despite these advancements, two key challenges persist:" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.149, + 0.483, + 0.481 + ], + "angle": 0, + "content": "(i) Inadequacy of Fine-grained Video Reasoning: Although advanced generative models have significantly improved global visual quality by reducing issues such as blurriness and flickering, they still exhibit localized spatiotemporal inconsistencies, distortions, unnatural artifacts, and violations of physical laws, especially in scenarios involving complex motion or multiple entities. For instance, Fig 1(a) shows a video generated by Pixverse that, despite its high overall visual appeal, contains a noticeably deformed hand in a localized area. This example underscores the need for more fine-grained and context-aware reasoning capabilities in video understanding, moving beyond superficial visual pattern recognition to incorporate temporally grounded and semantically rich analysis. (ii) Domain Gap in AI-Generated Videos: Current state-of-the-art MLLMs struggle to capture the intrinsic characteristics of AI-generated videos, even with well-defined prompts. As illustrated in Fig 1(b) and (c), GPT-4o misidentifies the deformed hand in a video and assigns a high score based on misleading explanations. This issue is primarily attributed to a domain gap between the training data used by MLLMs and the unique features of AI-generated videos. In essence, AI-generated videos can deceive MLLMs in certain latent feature spaces. Bridging this gap requires a high-quality dataset of AI-generated videos. Moreover, developing strategies to enhance the generalization of MLLMs to AI-generated videos remains an open challenge." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.481, + 0.483, + 0.73 + ], + "angle": 0, + "content": "Inspired by the Question Generation and Answering (QG/A) framework [6] and recent reasoning works [7, 21, 22, 46] that demonstrate a significant self-emergence of complex cognitive reasoning abilities induced by Deepseek R1 [10], we argue that incorporating fine-grained reasoning abilities would significantly enhance the video quality assessment. In this paper, we propose FingER, a novel framework that first decomposes the overall evaluation into fine-grained entity-level questions and then answers these questions with corresponding scores by a reasoning model, which is fine-tuned on our high-quality dataset using GRPO with a cold-start initialization. Specifically, we employ five distinct aspects as defined in VideoScore [11], including text-to-video alignment, temporal consistency, factual consistency, dynamic degree, and visual quality. By deriving such fine-grained entity-level questions, our framework not only enables the model to explicitly focus on specific characteristics of certain entities, thereby facilitating a more fine-grained understanding, but also enhances interpretability through these structured QA pairs." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.731, + 0.483, + 0.882 + ], + "angle": 0, + "content": "Based on these questions, we prompted several strong MLLMs [13, 25] to provide answers. However, we observed that these models struggle to provide correct answers, particularly in aspects like factual consistency. As stated before, we attribute this to the lack of high-quality AI-generated video datasets and the inadequate reasoning capabilities of current models. Therefore, we curated a fine-grained AI-generated video reasoning dataset, FingER-Instruct-60k, which consists of \\(3.3\\mathrm{k}\\) AI-generated videos sourced from advanced generation models like Kling, Luma, Vidu, PixVerse, CogVideoX [38], etc. For each video, we generate fine-grained questions and annotate them with 'Yes/No'. To ease human labor and" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.108, + 0.915, + 0.232 + ], + "angle": 0, + "content": "also reduce potential errors, we leverage MLLMs to generate detailed reasoning explanations given each question and its answer. (Note that, while MLLMs often struggle to answer these questions correctly, they demonstrate higher possibilities of producing coherent reasoning when the answer is explicitly provided, suggesting the presence of underlying reasoning capabilities.) These generated reasons were subsequently re-checked and refined by human annotators to ensure accuracy and quality. At last, we collect 60k fine-grained QA annotations with high-quality detailed reasons." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.233, + 0.915, + 0.37 + ], + "angle": 0, + "content": "To enhance the video reasoning capabilities, we choose Qwen2.5-VL [1], and explore multiple training protocols on our dataset, including directly training with answers, training with reasons, zero GRPO training and GRPO training with a cold-start initialization. Our experiments reveal that integrating high-quality reasons can largely increase the performance along with the interpretability, and GRPO with cold-start can further enhance its performance, especially in dimensions that require in-depth understanding. We also test our reasoning model in a zero-shot manner on public benchmarks, and still consistently achieve state-of-the-art performance." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.371, + 0.915, + 0.426 + ], + "angle": 0, + "content": "In summary, we propose an entity-level quality assessment framework with strong reasoning and generalization capabilities. To the best of our knowledge, our work is the first to introduce entity-level reasoning into the quality assessment of AI-generated videos." + }, + { + "type": "text", + "bbox": [ + 0.531, + 0.427, + 0.831, + 0.439 + ], + "angle": 0, + "content": "Our contributions can be summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.542, + 0.444, + 0.915, + 0.554 + ], + "angle": 0, + "content": "- Novel Evaluation Approach. We propose a novel evaluation approach FingER, designed for practical AI-generated video quality assessment. It comprises an entity-level question generation module and a video reasoning model that provides corresponding scores. By emphasizing fine-grained reasoning, our approach effectively addresses localized defects in AI-generated videos that require in-depth understanding and significantly enhances interpretability." + }, + { + "type": "text", + "bbox": [ + 0.542, + 0.555, + 0.915, + 0.635 + ], + "angle": 0, + "content": "- Fine-grained Reasoning Dataset. We present a new dataset for AI-generated video reasoning, containing 3.3k videos and 60k entity-level QA annotations sourced from advanced generation models. Each QA pair is annotated with detailed reasons. This dataset aims to further advance research in this field." + }, + { + "type": "text", + "bbox": [ + 0.542, + 0.637, + 0.915, + 0.72 + ], + "angle": 0, + "content": "- Enhanced Training Protocols. We explore multiple training protocols to enhance the fine-grained video reasoning capabilities of MLLMs. Notably, we are the first to introduce GRPO training into AI-generated video quality assessment, which proves to be highly effective in improving both reasoning and generalization abilities" + }, + { + "type": "text", + "bbox": [ + 0.542, + 0.721, + 0.915, + 0.789 + ], + "angle": 0, + "content": "- **Strong Performance.** Extensive experiments demonstrate the effectiveness of our approach. We achieve state-of-the-art performance on public benchmarks using only one-tenth of the training videos, thereby highlighting the superior generalization capability of our model." + }, + { + "type": "list", + "bbox": [ + 0.542, + 0.444, + 0.915, + 0.789 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.802, + 0.66, + 0.817 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.822, + 0.773, + 0.838 + ], + "angle": 0, + "content": "2.1 Video Quality Assessment" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.841, + 0.915, + 0.897 + ], + "angle": 0, + "content": "Early approaches relied on feature-based metrics, such as Fréchet Video Distance (FVD) [30], Inception Score (IS) [28], and CLIPSim [29]. And benchmark works like EvalCrafter [40] and VBench [47] introduced comprehensive evaluation frameworks with 18 and 16" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.086, + 0.104, + 0.46, + 0.462 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.462, + 0.104, + 0.914, + 0.462 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.473, + 0.916, + 0.503 + ], + "angle": 0, + "content": "Figure 2: The overview of our proposed FingER framework, including (a) the evaluation pipeline, (b) FingER-Instruct-60k dataset curation, and (c) GRPO training of our reasoning model." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.523, + 0.483, + 0.551 + ], + "angle": 0, + "content": "metrics, respectively. However, these methods fall short in assessing deep semantic understanding or aligning with human perception." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.551, + 0.483, + 0.759 + ], + "angle": 0, + "content": "With the rapid advancement of MLLMs [1, 5, 8, 25], increasing studies have explored to leverage their capabilities to facilitate image/video quality evaluation [6, 12, 18, 19, 36]. Inspired by DSG [6], which uses question generation/answering (QG/A) for interpretable assessment, T2VScore [14] adopted a QA framework for T2V alignment. T2VQA [27] introduced the T2VQA-DB dataset, comprising 10k videos annotated with Mean Opinion Scores (MOS), and trained a transformer-based model to predict these scores. Similarly, VideoScore [11] proposed a larger dataset across five dimensions and employed a MLLM for scoring. VMBench [20] introduced perception-aligned motion metrics to evaluate motion quality. While these methods predict scores or labels, they often overlook the reasoning behind assessments, limiting their effectiveness. Our work distinguishes itself by incorporating entity-level reasoning for evaluating advanced generation models reliably." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.759, + 0.483, + 0.855 + ], + "angle": 0, + "content": "Another line of research focuses on reward models for improving generative models via Reinforcement Learning from Human Feedback (RLHF), such as Diffusion-DPO [32], VisionReward [37] and UnifiedReward [34]. While these efforts target generative model optimization, our work emphasizes practical video quality evaluation, we expect it is able to further benefit the generation models using RLHF in future work." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.522, + 0.862, + 0.538 + ], + "angle": 0, + "content": "2.2 Reasoning Inference in Large Models" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.54, + 0.916, + 0.79 + ], + "angle": 0, + "content": "Reasoning inference aims to emulate human-like thinking processes by forming the final answer through a Large Language Model (LLM). Specifically, to answer a given question, an LLM is required to think divergently and record the thinking processes, which are subsequently referenced when formulating the final answer. This approach has inspired a variety of research, including prompting-based Chain-of-Thought (CoT) [35], planning-based Graph-of-Thought [3] and Tree-of-Thought [39] processing, reward methods [16], and supervised fine-tuning (SFT) datasets with sufficient context [41]. Notably, DeepSeek-R1 [10] integrates specific prompts with reinforcement learning (RL), enabling the model to first generate the thinking process before producing the final answer. This method allows for supervised fine-tuning with a small amount of annotated data containing thinking processes, followed by reinforcement learning fine-tuning on more data without thinking processes. A very recent approach [7] proposes a highly simplified reinforcement learning framework and demonstrates its validity across several benchmarks." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.808, + 0.61, + 0.822 + ], + "angle": 0, + "content": "3 Method" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.827, + 0.916, + 0.898 + ], + "angle": 0, + "content": "In this section, we first introduce our entity-level video quality assessment framework - FingER in Sec. 3.1. Then, we detail the data curation pipeline of our proposed dataset, namely FingER-Instruct-60k in Sec. 3.2. In the end, we combine multiple training methods with our proposed instruction tuning dataset, from the" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.082, + 0.107, + 0.486, + 0.136 + ], + "angle": 0, + "content": "basic supervised fine-tuning (SFT), to reasoning training with reinforcement learning (RL), as detailed in Sec. 3.3." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.185, + 0.365, + 0.202 + ], + "angle": 0, + "content": "3.1 Entity-level VQA Framework" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.204, + 0.483, + 0.37 + ], + "angle": 0, + "content": "For Text-to-Video (T2V) generation task, user input prompt is the only key instruction for generative models to understand and generate content that well-aligned with user's intent. To perform entity-level quality assessment of AI-generated videos, we start from understanding the user's prompt through extracting entities, attributes, and actions within itself. Inspired by DSG [6] in Text-to-Image (T2I) evaluation, we also utilize closed-source Large-Language-Model (LLM) to perform textual understanding and the following entity extraction. As shown in Fig. 4, we provide abundant in-context learning (ICL) [35] examples from different video generation scenarios and formulate the final input for GPT-4o [13], in which way we can harvest more steady entity extraction results." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.371, + 0.483, + 0.619 + ], + "angle": 0, + "content": "With entities extracted from the user's prompt, we generate entity-level questions from five distinct video quality assessment dimensions, including visual quality, text-to-video alignment, temporal consistency, factual consistency, and dynamic degree. For each dimension, we provide a detailed explanation followed by several key points, formulating the context information when prompting the LLM. We also prepare adequate entity-level in-context learning examples, which are summarized from videos with and without obvious artifacts or hallucinations. In this way, we can help the LLMs to better understand which question should be asked when coping with a specific entity along with the given assessment dimension. In short, we break down the granularity of fine-grained video quality assessment from multi-dimensional level to entity-level. And the intuition behind entity-level question generation is that we hope fine-grained question/answering can guide the MLLM to focus on understanding the correlation between entity-level textual description and its corresponding visual appearance based on the video content." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.62, + 0.483, + 0.897 + ], + "angle": 0, + "content": "After the entity-level question generation procedure, our fin-tuned MLLM answers the above questions with a simple \"Yes\" or \"No\", along with a detailed reasoning process explaining why the answer is that. Learning the logical reasoning process is critical for model performance improvements, as detailed in the experiment Sec. 4.4. The outputted reason can also be useful when conducting practical video quality assessment, which is more interpretable and user-friendly. To formulate a final score representing the overall quality of AI-generated videos, we start by calculating the probability of the answer token (\"Yes\" or \"No\") for each entity-level question to represent the entity-level score. Since there are multiple \"Yes\" and \"No\" with different formats but similar meanings in the vocabulary of our MLLM, we first gather the token set for \"Yes\" and \"No\". In this paper, we take [\"Yes\", \"yes\", \"YES\", \"Yes\", \"Yes\"] as the token set for answer \"Yes\", and [\"No\", \"no\", \"NO\", \"No\", \"No\"] for answer \"No\", denoted by \\( T_{Y} \\) and \\( T_{N} \\), respectively. With logits from the answer token, we extract all the logit whose token id is within the token set, and apply softmax over \\( T_{Y} \\cup T_{N} \\), as illustrated in Eq. 1. Then, given the entity-level question \\( q \\), we can get the answer's probability \\( P(No \\mid q) \\) and \\( P(Yes \\mid q) \\) with a simple sum" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.11, + 0.54, + 0.122 + ], + "angle": 0, + "content": "up." + }, + { + "type": "equation", + "bbox": [ + 0.59, + 0.121, + 0.913, + 0.164 + ], + "angle": 0, + "content": "\\[\nP (N o \\mid q) = \\sum_ {\\substack {i = 1 \\\\ m}} ^ {n} \\text {S o f t m a x} (x _ {i}), x _ {i} \\in T _ {N}; \\tag{1}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.591, + 0.162, + 0.837, + 0.196 + ], + "angle": 0, + "content": "\\[\nP (Y e s \\mid q) = \\sum_ {j = 1} ^ {m} S o f t m a x (y _ {j}), y _ {j} \\in T _ {Y}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.198, + 0.916, + 0.337 + ], + "angle": 0, + "content": "Instead of directly using the derived probability as the entity-level score, we still need the judgment on whether the question is positive or negative. For example, given the question \"Do the attributes of the table in the video (such as size, shape, and material) align with real-world characteristics?\" from the factual consistency dimension, it is apparent that the factual consistency of the assessed video goes up with a positive \"Yes\" answer. We define this type of question as a positive one, and vice versa. We denote the status of an entity-level question with \\( q_{stat} \\), if \\( q_{stat} \\) equals 1, it means that the question is positive; otherwise, the question is negative." + }, + { + "type": "equation", + "bbox": [ + 0.593, + 0.34, + 0.913, + 0.373 + ], + "angle": 0, + "content": "\\[\nS _ {\\text {e n t i t y}} = \\left\\{ \\begin{array}{l l} P (N o \\mid q), & \\text {i f} q _ {\\text {s t a t}} = 0; \\\\ P (Y e s \\mid q), & \\text {i f} q _ {\\text {s t a t}} = 1. \\end{array} \\right. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.377, + 0.916, + 0.558 + ], + "angle": 0, + "content": "With the aforementioned preparations setup, we propose our entity-level score \\( S_{entity} \\), which correlates positively with the quality of the assessed video. When the entity-level question is positive, we use the probability of the \"Yes\" answer \\( P(Yes \\mid q) \\) to represent the score it can gain. And we utilize the probability of the \"No\" answer \\( P(No \\mid q) \\) if the question is negative, as illustrated in Eq. 2. In short, our intuition behind this design is that as long as the video quality goes up with which answer, we calculate our entity-level score based on that answer's probability. Then, we utilize entity-level question/answering pairs that are under the same quality assessment dimension to formulate our dimension-level score \\( S_{dim} \\). To be specific, we simply calculate the linear summation of multiple answers' probability \\( S_{entity} \\), as illustrated in Eq. 3." + }, + { + "type": "equation", + "bbox": [ + 0.654, + 0.563, + 0.913, + 0.601 + ], + "angle": 0, + "content": "\\[\nS _ {d i m} = \\sum_ {i = 1} ^ {N} S _ {e n t i t y} i \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.605, + 0.913, + 0.634 + ], + "angle": 0, + "content": "In the end, we derive the overall-level score \\( S_{\\text{overall}} \\) with the weighted average of five distinct dimension scores \\( S_{dim} \\) in Eq. 4." + }, + { + "type": "equation", + "bbox": [ + 0.641, + 0.638, + 0.913, + 0.674 + ], + "angle": 0, + "content": "\\[\nS _ {\\text {o v e r a l l}} = \\sum_ {i = 1} ^ {5} w _ {i}. S _ {\\text {d i m}} i \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.678, + 0.916, + 0.747 + ], + "angle": 0, + "content": "In short, we propose the entity-level VQA framework FingER, which consists of three parts: (i) entity-level question generation, (ii) the fine-tuned MLLM with reasoning output, and (iii) the hierarchical scoring function that converts token probability to multi-level scores." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.759, + 0.852, + 0.776 + ], + "angle": 0, + "content": "3.2 Entity-level Dataset with Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.778, + 0.916, + 0.806 + ], + "angle": 0, + "content": "In this section, we introduce the construction pipeline of our entity-level instruction tuning dataset, named FingER-Instruct-60k." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.813, + 0.916, + 0.897 + ], + "angle": 0, + "content": "3.2.1 Prompt and T2V Model Selection. Based on VideoGenEval [42] dataset, our instruction tuning dataset is composed of 420 diverse text prompts and 3.3k AI-generated videos produced by 8 modern T2V models, including closed-source models: Kling, Luma, PixVerse, Vidu, Qingying, and open-sourced models: Mochi-1 [24], CogVideoX [38], Open-Sora [45]. We utilize all 420 text" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.082, + 0.107, + 0.484, + 0.26 + ], + "angle": 0, + "content": "prompts from the T2V session [45], which cover a diverse range of complex scenarios, including human-centric activities, material and spatial relationships, as well as animal and text generations. These prompts are derived from real-life user inputs. As for the T2V model selection, we denote models that understand and obey most of the common sense and physical laws, and generate time-consistent videos without obvious temporal distortions as the high-quality model. We select the generative models uniformly based solely on the quality of their generated videos, spanning from high-quality models to average-quality models, for a more diverse training data distribution." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.275, + 0.484, + 0.427 + ], + "angle": 0, + "content": "3.2.2 Entity-level Question Generation and Annotation. Our multi-dimensional entity-level question generation starts with understanding users' input prompts and extracting the entities within. We use GPT-40 [13] for prompt understanding and entity extraction, with abundant in-context learning examples provided. Then, we perform the entity-level question generation for our five distinct assessment dimensions. For each entity, we prompt the LLM with task introduction, assessment dimension explanation with several key points to focus on, user's input prompt, the extracted entity, and the most important in-context learning examples. And we extract the generated questions with regular expression matching." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.427, + 0.484, + 0.496 + ], + "angle": 0, + "content": "For data annotation, we engaged 10 professional annotators to complete the task of annotating \\(60\\mathrm{k}\\) question/answer pairs. Inter-annotator agreement was ensured through multiple rounds of small-scale pilot annotations, and the entire process took approximately one month to complete." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.511, + 0.483, + 0.677 + ], + "angle": 0, + "content": "3.2.3 Reasoning Generation and Verification. We employ the powerful closed-source MLLM [25] to generate the initial version of the reasoning process. Specifically, we prompt the MLLM with the assessment dimension explanation, user prompt, in-context learning examples, and the entity-level question along with its human-annotated result. An interesting finding is that when the MLLM is provided with the correct answer to the entity-level question, the generated reasoning process for explaining the answer is more reasonable than when directly generating the answer and its reason. Rather than using the MLLM-generated reasoning process directly, we conduct thorough human verification to ensure the quality of our reasoning training data." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.677, + 0.484, + 0.733 + ], + "angle": 0, + "content": "With aforementioned entity-level questions, human-annoted answers and detailed reasoning process, we formulate our instruction tuning dataset FingER-Instruct-60k, which serves as our basis for the model training in the next section." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.752, + 0.449, + 0.768 + ], + "angle": 0, + "content": "3.3 Instruction Tuning and GRPO Training" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.77, + 0.484, + 0.813 + ], + "angle": 0, + "content": "We use Qwen2.5-VL-7B-Instruct [1] as our base model and apply supervised fine-tuning, SFT with reasoning and reinforcement learning on it." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.827, + 0.484, + 0.897 + ], + "angle": 0, + "content": "3.3.1 Supervised Fine-Tuning. We directly train the base model on FingER-Instruct-60k, the response of model only contains \"Yes\" or \"No\" answer following the next token prediction paradigm. It means the model only needs to learn predicting the correct answer without any reasoning process. The loss function is Cross-Entropy" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.108, + 0.551, + 0.12 + ], + "angle": 0, + "content": "Loss:" + }, + { + "type": "equation", + "bbox": [ + 0.642, + 0.119, + 0.914, + 0.157 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {C E} = - \\sum_ {i = 1} ^ {N} y _ {i} \\log \\left(p _ {i}\\right) \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.162, + 0.915, + 0.273 + ], + "angle": 0, + "content": "3.3.2 Supervised Fine-Tuning with Reasoning. We also train base model on FingER-Instruct-60k, but the difference compared to Supervised Fine-Tuning is the model needs to learn predicting the correct answer within \\(<\\) answer \\(>\\) ... \\(<\\) /answer \\(>\\) tag and its reasoning processes within \\(<\\) reason \\(>\\) ... \\(<\\) /reason \\(>\\) tag. We apply prompt engineering on the input tokens to reach this difference. The loss also contains the gap of reasoning processes and the gap of answers." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.28, + 0.917, + 0.363 + ], + "angle": 0, + "content": "3.3.3 GRPO Training. We employ GRPO [23] to enhance reasoning inference performance, exploring two protocols: (i) Zero-GRPO, which relies solely on reinforcement learning without initial supervised data; and (ii) GRPO with cold-start Supervised Fine-Tuning, which combines initial supervised learning with subsequent reinforcement optimization." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.37, + 0.917, + 0.468 + ], + "angle": 0, + "content": "Zero-GRPO. Zero-GRPO is an exploratory attempt that is initiated directly from Qwen-2.5-VL [1] and uses RL to implicitly improve reasoning abilities without annotated reason. For each video question pair, we first sample a group of outputs \\(\\{o_1,o_2,\\dots,o_G\\}\\) by old policy \\(\\pi_{\\theta_{old}}(o_i|v,q)\\), \\(v\\) denotes the video that needs to be evaluated, \\(q\\) denotes the question for each entity and dimension. Then update the policy model \\(\\pi_{\\theta}\\) by minimizing the following loss." + }, + { + "type": "equation", + "bbox": [ + 0.561, + 0.472, + 0.914, + 0.6 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {G R P O} (\\theta) = - \\mathbb {E} [ q \\sim P (Q), \\{o _ {i} \\} _ {i = 1} ^ {G} \\sim \\pi_ {\\theta_ {o l d}} (O | v, q) ] \\\\ \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\left(\\min \\left(\\frac {\\pi_ {\\theta} (o _ {i} | v , q)}{\\pi_ {\\theta o l d} (o _ {i} | v , q)} * A d v _ {i}, \\right. \\right. \\\\ \\left. \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta} \\left(o _ {i} \\mid v , q\\right)}{\\pi_ {\\theta o l d} \\left(o _ {i} \\mid v , q\\right)}, 1 - \\epsilon , 1 + \\epsilon\\right) * A d v _ {i}\\right) \\tag {6} \\\\ \\left. + \\beta \\mathbb {D} _ {K L} \\left(\\pi_ {\\theta} | | \\pi_ {r e f}\\right)\\right) \\\\ \\end{array}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.551, + 0.612, + 0.914, + 0.644 + ], + "angle": 0, + "content": "\\[\n\\mathbb {D} _ {K L} \\left(\\pi_ {\\theta} \\| \\pi_ {r e f}\\right) = \\frac {\\pi_ {r e f} \\left(o _ {i} | v , q\\right)}{\\pi_ {\\theta} \\left(o _ {i} | v , q\\right)} - l o g \\frac {\\pi_ {r e f} \\left(o _ {i} | v , q\\right)}{\\pi_ {\\theta} \\left(o _ {i} | v , q\\right)} - 1 \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.646, + 0.916, + 0.703 + ], + "angle": 0, + "content": "\\(\\beta\\) denotes the coefficient of Kullback-Leibler Divergence [15] between base model and policy model, \\(\\epsilon\\) denotes the threshold of clip. \\(Adv_{i}\\) is the advantage which is the normalization of a group of rewards \\(\\{r_1,r_2,\\dots,r_G\\}\\) computed from outputs within each group:" + }, + { + "type": "equation", + "bbox": [ + 0.618, + 0.712, + 0.914, + 0.743 + ], + "angle": 0, + "content": "\\[\nA d v _ {i} = \\frac {r _ {i} - M e a n \\left\\{r _ {1} , r _ {2} , \\dots , r _ {g} \\right\\}}{\\operatorname {S t d} \\left\\{r _ {1} , r _ {2} , \\dots , r _ {G} \\right\\}} \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.744, + 0.76, + 0.757 + ], + "angle": 0, + "content": "\\(r_i\\) is composed of two reward functions:" + }, + { + "type": "equation", + "bbox": [ + 0.634, + 0.773, + 0.914, + 0.789 + ], + "angle": 0, + "content": "\\[\nr _ {i} = r _ {\\text {a c c u r a c y} _ {i}} + r _ {\\text {f o r m a t} _ {i}} \\tag {9}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.601, + 0.792, + 0.914, + 0.828 + ], + "angle": 0, + "content": "\\[\nr _ {\\text {a c c u r a c y} _ {i}} = \\left\\{ \\begin{array}{l l} 1. 0 & \\text {i f a n s w e r} _ {i} = G T _ {i} \\\\ 0. 0 & \\text {e l s e} \\end{array} \\right. \\tag {10}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.572, + 0.832, + 0.914, + 0.867 + ], + "angle": 0, + "content": "\\[\nr _ {\\text {f o r m a t} i} = \\left\\{ \\begin{array}{l l} 1. 0 & \\text {i f} o _ {i} \\text {i n c l u d e s c o r r e c t f o r m a t} \\\\ 0. 0 & \\text {e l s e} \\end{array} \\right. \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.869, + 0.845, + 0.882 + ], + "angle": 0, + "content": "Correct format means the output \\(o_i\\) contains two tags:" + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.883, + 0.9, + 0.896 + ], + "angle": 0, + "content": "\\(< \\text{answer}>\\ldots < / \\text{answer}>\\) and \\(< \\text{reason}>\\ldots < / \\text{reason}>\\)" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.084, + 0.107, + 0.482, + 0.137 + ], + "angle": 0, + "content": "\"Yes\" or \"No\" token only appears within the answer tag, and the reasoning process only appears within reason tag." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.142, + 0.483, + 0.254 + ], + "angle": 0, + "content": "GRPO with cold-start Supervised Fine-Tuning. DeepSeek-R1 demonstrated that fine-tuning on an annotated dataset with reasoning processes before applying reinforcement learning (RL) yields better performance than directly using RL [10]. We adopt this approach in our supervised fine-tuning model. The sole difference between Zero-GRPO and GRPO with cold-start Supervised Fine-Tuning lies in the base model: the latter is initialized from a model pre-trained on annotated data containing reasoning processes." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.265, + 0.22, + 0.28 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.285, + 0.388, + 0.299 + ], + "angle": 0, + "content": "4.1 Datasets and Evaluation Metrics" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.303, + 0.483, + 0.471 + ], + "angle": 0, + "content": "4.1.1 Datasets. We split 185 generated videos (around \\(5\\%\\) of whole data) with 3.5k entity-level questions from 5 distinct quality assessment dimensions to formulate our FingER-test dataset. Regarding the public benchmarks, we adopt the popular GenAI-Bench [17] and recently released MonetBench [37] for performance evaluation. GenAI-Bench contains 800 unique text prompts paired with 4 T2V models, and each generated video has MOS (Mean Opinion Scores) annotated by 3 annotators. MonetBench consists of 1000 different text prompts, each paired with 2 T2V models. Each pair of videos is generated with the same prompt but different video generation models. MonetBench annotates the video pair with human preferences, including \"win\", \"lose\", and \"tie\" options." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.476, + 0.483, + 0.614 + ], + "angle": 0, + "content": "4.1.2 Evaluation Metrics. We report the accuracy (Acc) of \"Yes\" or \"No\" answers, the Pearson linear correlation coefficient (PLCC), and the Spearman rank correlation coefficient (SRCC) on our proposed FingER-test dataset. We evaluate our models with and without token probability calculation, denoted by \\((w / o\\text{prob})\\) and \\((w/\\text{prob})\\) in Tab. 1 and Tab. 2. Following previous works in [11, 19], we utilize the SRCC and the PLCC for evaluating model's performance on GenAI-Bench. And we use pairwise accuracy as the metrics for human preference evaluation on MonetBench and report tau and diff, followed [9, 43]." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.627, + 0.322, + 0.642 + ], + "angle": 0, + "content": "4.2 Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.645, + 0.483, + 0.838 + ], + "angle": 0, + "content": "Based on Qwen-2.5-VL-7B [1], we fine-tune our model with the following experiment settings: learning rate of \\(5.0\\mathrm{e - 6}\\), global batch size of 32, video input fps (frame-per-second) is set to 2, and video maximum input resolution is set to \\(448\\times 448\\) pixels. We utilize LLaMA-Factory [44] as our supervised fine-tuning (SFT) codebase. We perform SFT on our proposed FingER-Instruct-60k dataset for 2 epochs with 8 NVIDIA H20 GPUs, and the training steps are the same for the model trained with extra reasoning process. As for the settings of our reinforcement learning (RL) experiments, we employ Huggingface-TRL [31] as our RL fine-tuning tool with following hyper-parameters to implement GRPO: \\(\\beta = 0.04\\), and the number of group \\(G = 16\\), \\(\\epsilon = 0.2\\), \\(\\mu = 1\\), the initial learning rate of RL is \\(5.0\\mathrm{e - 7}\\). We train Zero-GRPO and GRPO with cold-start for 2k steps on 4 NVIDIA H20 GPUs." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.85, + 0.442, + 0.866 + ], + "angle": 0, + "content": "4.3 Zero-shot Performance on FingER-test" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.868, + 0.482, + 0.897 + ], + "angle": 0, + "content": "We report the zero-shot performance of Qwen2.5-VL across five dimensions on our dataset. Through ablations on resolution, frame" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.103, + 0.912, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.514, + 0.251, + 0.915, + 0.279 + ], + "angle": 0, + "content": "Figure 3: Zero-shot performance on five distinct assessment dimensions with different input resolution and fps." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.315, + 0.915, + 0.357 + ], + "angle": 0, + "content": "rate (fps), and evaluation granularity, we reveal the capabilities of the base model to handle different dimensions, and further demonstrate the crucial importance of integrating entity-level evaluation." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.357, + 0.915, + 0.481 + ], + "angle": 0, + "content": "Increasing resolution and fps leads to slight improvements. Fig. 3 illustrates the accuracy across five dimensions when prompted with entity-level questions. We can see that the accuracy curves show slight improvements with increasing resolutions or frame rates (fps), albeit at a significant computational cost. These results suggest that resolution and fps are not the primary factors of performance enhancement. Consequently, for efficiency we adopt \\(448 \\times 448\\) pixels and 2 fps as the default settings for subsequent zero-shot and supervised fine-tuning (SFT) experiments." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.482, + 0.915, + 0.633 + ], + "angle": 0, + "content": "Performance varies significantly across different dimensions. As shown in Fig. 3, the zero-shot accuracy for visual quality is exceptionally low at \\(26.1\\%\\), while factual consistency achieves \\(57.6\\%\\). In contrast, dimensions like text alignment show higher accuracy at \\(80.59\\%\\), likely due to the base model's inherent capabilities from pre-training on caption data. We believe that the notably low accuracy in visual quality is primarily attributed to misalignment from AI-generated videos, and the main challenges still lie in dimensions requiring in-depth reasoning, such as factual consistency, temporal consistency, and text alignment, which will be further demonstrated in the following section." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.634, + 0.915, + 0.897 + ], + "angle": 0, + "content": "Integrating entity-level evaluations brings a substantial performance gain. To validate the efficacy of our entity-level QA framework, we conduct experiments across three evaluation granularities: overall level, dimension level, and our proposed entity level, as detailed in Tab. 1. The overall level (1st row) prompts the model with an overall assessment rating from 1 to 4, accompanied by detailed evaluation criteria, while the dimension level (2nd row) prompts model to rate each dimension from 1 to 4, which are then averaged to get a final score. The results of our proposed entity-level (3rd and 4th rows) are reported with and without a probability calculation strategy introduced in Sec. 3.1, and furthermore, we instruct the model to provide explanatory reasoning along with answers (last two rows). Compared to the entity-level framework, both the overall and dimension levels exhibit substantial performance degradation across all dimensions, indicating that fine-grained evaluation substantially enhances the model's performance. It is worth noting that incorporating explanatory reasoning does not bring improvements, revealing the inherent limitations of the base model in understanding AI-generated videos." + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.195, + 0.105, + 0.802, + 0.119 + ], + "angle": 0, + "content": "Table 1: Correlation between model Zero-shot answer and human reference on FingER-test" + }, + { + "type": "table", + "bbox": [ + 0.088, + 0.133, + 0.91, + 0.251 + ], + "angle": 0, + "content": "
MethodVisual QualityTemporalDynamic DegreeText AlignmentFactualOverall
Qwen2.5-VLAcc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCC
Overall Level------/30.68/29.27
Dimension Level-/35.06/35.54-/16.05/17.06-/14.81/14.09-/33.68/32.62-/13.86/12.28-/52.32/61.14
Entity (w/o prob)25.33/1.85/5.2278.72/83.26/83.9172.87/51.04/48.9881.6/70.68/73.4458.34/51.03/53.2766.50/80.86/83.71
Entity (w/ prob)25.33/40.60/40.9478.72/84.51/85.4472.87/56.48/56.8581.6/74.09/76.4958.34/57.45/58.6766.50/81.23/85.26
+Reason (w/o prob)45.71/49.97/49.6177.65/83.12/83.8975.21/54.30/52.8781.08/73.24/75.3140.51/17.43/23.5563.96/73.40/79.15
+Reason (w/ prob)45.71/46.29/49.6477.65/84.60/83.8975.21/48.88/52.8081.08/72.38/75.3540.51/29.27/23.5063.96/73.29/79.18
" + }, + { + "type": "table_caption", + "bbox": [ + 0.104, + 0.264, + 0.892, + 0.278 + ], + "angle": 0, + "content": "Table 2: Correlation between SFT/RL model answer and human reference on FingER-test (Z-GRPO means Zero-GRPO)" + }, + { + "type": "table", + "bbox": [ + 0.088, + 0.293, + 0.91, + 0.474 + ], + "angle": 0, + "content": "
MethodVisual QualityTemporalDynamic DegreeText AlignmentFactualOverall
Acc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCC
GPT-4o [13]62.19/56.24/57.9377.83/78.64/79.1368.31/54.14/57.0283.41/72.20/74.3358.77/48.93/49.5169.92/81.25/82.36
VideoScore [11]-/22.80/18.55-/23.84/26.06-/9.49/7.18-/19.18/13.87-/22.93/18.31-/20.39/17.68
Qwen2.5-VL [1]25.33/40.60/40.9478.72/84.51/85.4472.87/56.48/56.8581.6/74.09/76.4958.34/57.45/58.6766.50/81.23/85.26
Z-GRPO (w/o prob)76.01/73.39/70.4678.01/83.13/83.8277.93/69.74/68.4784.46/73.80/75.9955.21/47.47/50.3374.51/83.46/86.56
Z-GRPO (w/ prob)76.01/71.83/71.9778.01/81.81/83.8677.93/67.49/68.5184.46/74.38/76.2855.21/42.21/50.1574.51/83.24/86.82
FingER (w/o prob)83.78/83.48/82.5383.33/83.13/83.7083.23/71.37/67.9582.77/70.94/73.7572.89/64.12/64.6181.25/88.87/89.67
FingER (w/ prob)83.78/85.31/85.2283.33/86.24/86.9983.23/77.07/74.7382.77/73.85/77.9872.89/70.99/69.2681.25/90.23/91.41
+Reason (w/o prob)84.05/81.51/81.0084.04/85.88/86.6382.49/69.22/68.2286.79/77.87/79.7774.03/67.47/68.4182.33/89.79/91.64
+Reason (w/ prob)84.05/83.85/83.8784.04/86.51/87.0982.49/76.11/76.7086.79/79.34/83.1674.03/71.70/70.2782.33/90.31/92.04
+GRPO (w/o prob)82.30/80.62/78.0982.98/85.08/85.5781.63/65.54/64.9285.88/75.74/77.9174.04/68.65/70.7381.41/89.26/91.25
+GRPO (w/ prob)82.30/83.76/83.5182.98/86.64/87.4381.63/75.05/74.6885.88/78.32/82.6374.04/71.87/72.0381.41/90.43/92.41
" + }, + { + "type": "table_caption", + "bbox": [ + 0.084, + 0.492, + 0.482, + 0.504 + ], + "angle": 0, + "content": "Table 3: Zero-shot Evaluation Results on Public Benchmarks" + }, + { + "type": "table", + "bbox": [ + 0.088, + 0.52, + 0.477, + 0.692 + ], + "angle": 0, + "content": "
MethodGenAI-Bench[17]MonetBench[37]
SRCCPLCCtaudiff
GPT-4o[13]35.7936.6145.7048.30
Qwen2.5-VL[1]46.6244.2946.7044.27
VideoScore[11]42.2240.6249.1054.90
VQAScore[19]52.7050.6056.1059.50
Zero-GRPO49.5844.3951.3051.34
FingER54.1352.6053.9057.31
+ Reason56.6857.2557.8062.07
+ GRPO57.0356.5958.0062.80
" + }, + { + "type": "title", + "bbox": [ + 0.083, + 0.725, + 0.456, + 0.741 + ], + "angle": 0, + "content": "4.4 SFT and RL Performance on FingER-test" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.743, + 0.482, + 0.841 + ], + "angle": 0, + "content": "In this section, we report the performance of our reasoning model on FingER-test using different training protocols including SFT with answers, SFT with reasons, zero GRPO, and GRPO with a cold start, we also provide results using the closed-source model GPT-40 and VideScore [11] for comparisons, as detailed in Tab. 2. Note that all these results, except for VideoScore [11], are obtained by entity-level evaluations for fair comparisons." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.841, + 0.483, + 0.897 + ], + "angle": 0, + "content": "Our model, trained with only answers, demonstrates significant performance improvements over the base model, achieving overall gains of 14.75/9.00/6.15 in Acc/SRCC/PLCC, respectively. Substantial improvements are observed in the dimensions of visual quality," + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.495, + 0.914, + 0.536 + ], + "angle": 0, + "content": "dynamic degree, and factual consistency. Note that the improvement in the text alignment dimension is limited, mainly due to its inherent capabilities derived from pre-training data." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.536, + 0.915, + 0.633 + ], + "angle": 0, + "content": "Incorporating additional reasoning during training further boosts the performance, particularly in the dimensions of text alignment, factual consistency, and temporal consistency. For the text alignment dimension, the SFT with reasoning harvests performance gains with 4.02/5.49/5.18 in Acc/SRCC/PLCC. These improvements underscore the importance of in-depth video understanding to achieve higher performance in these dimensions." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.633, + 0.915, + 0.867 + ], + "angle": 0, + "content": "We further investigate the reasoning training using RL, which includes two kinds of training procedures: (1) Zero-GRPO, and (2) GRPO initialized with a cold-start from reasoning SFT training. The results presented in Table 2 reveal that Zero-GRPO fails to predict correct answers. Upon closer examination of the training process, we identified that the issue stems from the reasoning component. Zero-GRPO generates reasons that resemble captions rather than logical reasoning. In contrast, when GRPO is applied with a cold-start initialization from our reasoning SFT model, it is able to surpass the SFT model with only 1k additional training steps. Among these dimensions, we observed steady performance improvements in the temporal and factual consistency dimensions, with boosts of \\(1.15 / 0.88 / 2.77\\) in factual consistency. We believe that the reasoning cold-start teaches the model to reason in a rough manner, while GRPO guides it towards adopting reasons with correct answers, thereby incentivizing the reasoning capability in the model." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.868, + 0.915, + 0.896 + ], + "angle": 0, + "content": "Moreover, we evaluate the performance on our proposed FingER-test dataset with closed-source MLLM [13] (1st row), and VideoScore" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.093, + 0.103, + 0.697, + 0.113 + ], + "angle": 0, + "content": "Text Prompt: The camera follows a person standing alone by the lake, gazing at the distant sunset, with their reflection mirrored on the water's surface." + }, + { + "type": "image", + "bbox": [ + 0.087, + 0.113, + 0.913, + 0.555 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.182, + 0.569, + 0.816, + 0.584 + ], + "angle": 0, + "content": "Figure 4: Qualitative results. We show several reasoning results outputted by our GRPO model." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.605, + 0.483, + 0.633 + ], + "angle": 0, + "content": "[11] (2nd row), our proposed FingER outperforms those methods with a large margin across all five assessment dimensions." + }, + { + "type": "title", + "bbox": [ + 0.083, + 0.67, + 0.418, + 0.687 + ], + "angle": 0, + "content": "4.5 Comparison on Public Benchmarks" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.689, + 0.483, + 0.897 + ], + "angle": 0, + "content": "Tab. 3 demonstrates the consistent improvements achieved by our method on two public benchmarks. We compare our methods with GPT-40, Qwen2.5-VL and two other approaches. Specifically, with only Yes/No answer prediction, we already outperform all methods on GenAI-Bench, indicating the effectiveness of our fine-grained evaluation framework. Training with reasons and GRPO with a cold-start leads to further improvements with a final \\(8.21\\% / 11.83\\%\\) SRCC/PLCC relative performance boost. On MonetBench, without any weight fitting, we just average scores of five dimensions, our method is able to achieve \\(3.39\\% / 5.55\\%\\) relative improvements of tau/diff. It is worth noting that VideoScore [11] is trained using 37.6k training videos, while VQAScore [19] utilizes 665k samples, we outperform these methods with only 3.3k training videos without additional training samples from other sources, which is at most one-tenth of the training size adopted by other methods." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.604, + 0.64, + 0.617 + ], + "angle": 0, + "content": "5 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.622, + 0.917, + 0.858 + ], + "angle": 0, + "content": "In this paper, we emphasize the critical importance of integrating fine-grained reasoning into AI-generated video quality assessment, and we propose FingER, an entity-level fine-grained quality assessment framework with five distinct evaluation dimensions for AI-generated videos. To bridge the gap between non-AI videos and AI-generated videos, we construct a high-quality dataset, FingER-Instruct-60k, which consists of 3.3k videos generated by modern T2V models and 60k entity-level question / answering / reasoning pairs. Based on this dataset, we explore multiple training protocols to best incentivize the model's reasoning capability, including reason SFT, zero GRPO and GRPO with a reasoning cold-start. Extensive experiments demonstrate that by utilizing GRPO training with a cold-start, our method not only achieves the best performance on our dataset, but also outperforms other methods and closed-source models on two public benchmarks. And it is worth noting that we achieve SOTA performance with only 3.3k training samples." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.085, + 0.106, + 0.178, + 0.12 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.123, + 0.482, + 0.154 + ], + "angle": 0, + "content": "[1] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. 2025. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.154, + 0.483, + 0.193 + ], + "angle": 0, + "content": "[2] Fan Bao, Chendong Xiang, Gang Yue, Guande He, Hongzhou Zhu, Kaiwen Zheng, Min Zhao, Shilong Liu, Yaole Wang, and Jun Zhu. 2024. Vudu: a highly consistent, dynamic and skilled text-to-video generator with diffusion models. arXiv preprint arXiv:2405.04233 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.194, + 0.482, + 0.243 + ], + "angle": 0, + "content": "[3] Maciej Besta, Nils Blach, Ales Kubicek, Robert Gerstenberger, Michal Podstawski, Lukas Gianinazzi, Joanna Gajda, Tomasz Lehmann, Hubert Niewiadomski, Piotr Nczyk, et al. 2024. Graph of thoughts: Solving elaborate problems with large language models. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 38. 17682-17690." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.245, + 0.483, + 0.284 + ], + "angle": 0, + "content": "[4] Tim Brooks, Bill Peebles, Connor Holmes, Will DePue, Yufei Guo, Li Jing, David Schnurr, Joe Taylor, Troy Luhman, Eric Luhman, et al. 2024. Video generation models as world simulators. 2024. URL https://openai.com/research/video-generation-models-as-world-simulators-3 (2024), 1." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.285, + 0.482, + 0.333 + ], + "angle": 0, + "content": "[5] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. 2024. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 24185-24198." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.335, + 0.482, + 0.375 + ], + "angle": 0, + "content": "[6] Jaemin Cho, Yushi Hu, Jason M Baldridge, Roopal Garg, Peter Anderson, Ranjay Krishna, Mohit Bansal, Jordi Pont-Tuset, and Su Wang. 2024. Davidsonian Scene Graph: Improving Reliability in Fine-grained Evaluation for Text-to-Image Generation. In ICLR." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.376, + 0.482, + 0.405 + ], + "angle": 0, + "content": "[7] Xiangxiang Chu, Hailang Huang, Xiao Zhang, Fei Wei, and Yong Wang. 2025. GPG: A Simple and Strong Reinforcement Learning Baseline for Model Reasoning. arXiv preprint arXiv:2504.02546 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.406, + 0.482, + 0.445 + ], + "angle": 0, + "content": "[8] Xiangxiang Chu, Limeng Qiao, Xinyu Zhang, Shuang Xu, Fei Wei, Yang Yang, Xiaofei Sun, Yiming Hu, Xinyang Lin, Bo Zhang, et al. 2024. MobilevIm v2: Faster and stronger baseline for vision language model. arXiv preprint arXiv:2402.03766 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.446, + 0.482, + 0.486 + ], + "angle": 0, + "content": "[9] Daniel Deutsch, George Foster, and Markus Freitag. 2023. Ties Matter: Meta-Evaluating Modern Metrics with Pairwise Accuracy and Tie Calibration. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing. 12914-12929." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.486, + 0.482, + 0.526 + ], + "angle": 0, + "content": "[10] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.527, + 0.482, + 0.576 + ], + "angle": 0, + "content": "[11] Xuan He, Dongfu Jiang, Ge Zhang, Max Ku, Achint Soni, Sherman Siu, Haonan Chen, Abhranil Chandra, Ziyan Jiang, Aaran Arulraj, et al. 2024. VideoScore: Building Automatic Metrics to Simulate Fine-grained Human Feedback for Video Generation. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing. 2105-2123." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.577, + 0.482, + 0.617 + ], + "angle": 0, + "content": "[12] Hailang Huang, Yong Wang, Zixuan Huang, Huaqiu Li, Tongwen Huang, Xi-angxiang Chu, and Richong Zhang. 2024. MMGenBench: Evaluating the Limits of LMMs from the Text-to-Image Generation Perspective. arXiv preprint arXiv:2411.14062 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.617, + 0.482, + 0.647 + ], + "angle": 0, + "content": "[13] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. 2024. GPT-4o System Card. arXiv preprint arXiv:2410.21276 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.647, + 0.482, + 0.687 + ], + "angle": 0, + "content": "[14] Haoning Wu Xintao Wang Yixiao Ge Xiaodong Cun David Junhao Zhang Jia-Wei Liu Yuchao Gu Rui Zhao Weisi Lin Wynne Hsu Ying Shan Jay Zhangjie Wu, Guian Fang and Mike Zheng Shou. 2024. Towards A Better Metric for Text-to-Video Generation. arXiv:2401.07781 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.688, + 0.361, + 0.697 + ], + "angle": 0, + "content": "[15] Solomon Kullback. 1951. Kullback-leibler divergence." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.698, + 0.482, + 0.727 + ], + "angle": 0, + "content": "[16] Xin Lai, Zhuotao Tian, Yukang Chen, Senqiao Yang, Xiangru Peng, and Jiaya Jia. 2024. Step-dpo: Step-wise preference optimization for long-chain reasoning of llms. arXiv preprint arXiv:2406.18629 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.728, + 0.482, + 0.768 + ], + "angle": 0, + "content": "[17] Baiqi Li, Zhiqiu Lin, Deepak Pathak, Jiayao Emily Li, Xide Xia, Graham Neubig, Pengchuan Zhang, and Deva Ramanan. 2024. GenAI-bench: A holistic benchmark for compositional text-to-visual generation. In Synthetic Data for Computer Vision Workshop@ CVPR 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.769, + 0.482, + 0.798 + ], + "angle": 0, + "content": "[18] Mingxing Li, Rui Wang, Lei Sun, Yancheng Bai, and Xiangxiang Chu. 2025. Next Token Is Enough: Realistic Image Quality and Aesthetic Scoring with Multimodal Large Language Model. arXiv preprint arXiv:2503.06141 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.799, + 0.482, + 0.838 + ], + "angle": 0, + "content": "[19] Zhiqiu Lin, Deepak Pathak, Baiqi Li, Jiayao Li, Xide Xia, Graham Neubig, Pengchuan Zhang, and Deva Ramanan. 2024. Evaluating text-to-visual generation with image-to-text generation. In European Conference on Computer Vision. Springer, 366–384." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.839, + 0.482, + 0.878 + ], + "angle": 0, + "content": "[20] Xinrang Ling, Chen Zhu, Meiqi Wu, Hangyu Li, Xiaokun Feng, Cundian Yang, Aiming Hao, Jiashu Zhu, Jiahong Wu, and Xiangxiang Chu. 2025. VMBench: A Benchmark for Perception-Aligned Video Motion Generation. arXiv preprint arXiv:2503.10076 (2025)." + }, + { + "type": "list", + "bbox": [ + 0.087, + 0.123, + 0.483, + 0.878 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.109, + 0.914, + 0.14 + ], + "angle": 0, + "content": "[21] Zichen Liu, Changyu Chen, Wenjun Li, Penghui Qi, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. 2025. Understanding r1-zero-like training: A critical perspective. arXiv preprint arXiv:2503.20783 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.141, + 0.914, + 0.17 + ], + "angle": 0, + "content": "[22] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. 2025. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.171, + 0.914, + 0.21 + ], + "angle": 0, + "content": "[23] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. 2024. Deepseemath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.211, + 0.854, + 0.221 + ], + "angle": 0, + "content": "[24] Genmo Team. 2024. Mochi 1. https://github.com/genmoai/models." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.222, + 0.914, + 0.261 + ], + "angle": 0, + "content": "[25] Gemini Team, Petko Georgiev, Ving Ian Lei, Ryan Burnell, Libin Bai, Anmol Gulati, Garrett Tanzer, Damien Vincent, Zhufeng Pan, Shibo Wang, et al. 2024. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.262, + 0.914, + 0.291 + ], + "angle": 0, + "content": "[26] Zachary Teed and Jia Deng, 2020. Raft: Recurrent all-pairs field transforms for optical flow. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part II 16. Springer, 402-419." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.292, + 0.914, + 0.33 + ], + "angle": 0, + "content": "[27] Zicheng Zhang Chunyi Li Haoning Wu Xiongkuo Min Guangtao Zhai Tengchuan Kou, Xiaohong Liu and Ning Liu. 2024. Subjective-aligned dataset and metric for text-to-video quality assessment. arXiv preprint arXiv:2403.11956 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.332, + 0.914, + 0.361 + ], + "angle": 0, + "content": "[28] Wojciech Zaremba Vicki Cheung Alec Radford Tim Salimans, Ian Goodfellow and Xi Chen. 2016. Improved techniques for training gans. Advances in neural information processing systems, 29 (2016)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.362, + 0.914, + 0.401 + ], + "angle": 0, + "content": "[29] Wojciech Zaremba Vicki Cheung Alec Radford Tim Salimans, Ian Goodfellow and Xi Chen. 2021. Chenfei Wu, Lun Huang, Qianxi Zhang, Binyang Li, Lei Ji, Fan Yang, Guillermo Sapiro, and Nan Duan. arXiv preprint arXiv:2104.14806 (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.402, + 0.914, + 0.431 + ], + "angle": 0, + "content": "[30] Thomas Unterthiner, Sjoerd Van Steenkiste, Karol Kurach, Raphael Marinier, Marcin Michalski, and Sylvain Gelly. 2019. FVD: A new metric for video generation. ICLR 2019 Workshop DeepGenStruct (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.432, + 0.914, + 0.471 + ], + "angle": 0, + "content": "[31] Leandro von Werra, Younes Belkada, Lewis Tunstall, Edward Beeching, Tristan Thrush, Nathan Lambert, Shengyi Huang, Kashif Rasul, and Quentin Galloudec. 2020. TRL: Transformer Reinforcement Learning. https://github.com/huggingface/trl." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.473, + 0.914, + 0.512 + ], + "angle": 0, + "content": "[32] Bram Wallace, Meihua Dang, Rafael Rafailov, Linqi Zhou, Aaron Lou, Senthil Purushwalkam, Stefano Ermon, Caiming Xiong, Shafiq Joty, and Nikhil Naik. 2024. Diffusion model alignment using direct preference optimization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 8228-8238." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.513, + 0.914, + 0.542 + ], + "angle": 0, + "content": "[33] Yibin Wang, Zhiyu Tan, Junyan Wang, Xiaomeng Yang, Cheng Jin, and Hao Li. 2024. Lift: Leveraging human feedback for text-to-video model alignment. arXiv preprint arXiv:2412.04814 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.543, + 0.914, + 0.572 + ], + "angle": 0, + "content": "[34] Yibin Wang, Yuhang Zang, Hao Li, Cheng Jin, and Jiaqi Wang. 2025. Unified Reward Model for Multimodal Understanding and Generation. arXiv preprint arXiv:2503.05236 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.573, + 0.914, + 0.613 + ], + "angle": 0, + "content": "[35] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems 35 (2022), 24824-24837." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.614, + 0.914, + 0.653 + ], + "angle": 0, + "content": "[36] Haoning Wu, Zicheng Zhang, Weixia Zhang, Chaofeng Chen, Liang Liao, Chunyi Li, Yixuan Gao, Annan Wang, Erli Zhang, Wenxiu Sun, et al. 2024. Q-Align: Teaching LMMs for Visual Scoring via Discrete Text-Defined Levels. In International Conference on Machine Learning. PMLR, 54015-54029." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.654, + 0.914, + 0.693 + ], + "angle": 0, + "content": "[37] Jiazheng Xu, Yu Huang, Jiale Cheng, Yuanming Yang, Jiajun Xu, Yuan Wang, Wenbo Duan, Shen Yang, Qunlin Jin, Shurun Li, et al. 2024. Visionreward: Fine-grained multi-dimensional human preference learning for image and video generation. arXiv preprint arXiv:2412.21059 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.694, + 0.914, + 0.734 + ], + "angle": 0, + "content": "[38] Zhuoyi Yang, Jiayan Teng, Wendi Zheng, Ming Ding, Shiyu Huang, Jiazheng Xu, Yuanming Yang, Wenyi Hong, Xiaohan Zhang, Guanyu Feng, et al. 2024. Cogvideox: Text-to-video diffusion models with an expert transformer. arXiv preprint arXiv:2408.06072 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.735, + 0.914, + 0.773 + ], + "angle": 0, + "content": "[39] Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Tom Griffiths, Yuan Cao, and Karthik Narasimhan. 2023. Tree of thoughts: Deliberate problem solving with large language models. Advances in neural information processing systems 36 (2023), 11809-11822." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.775, + 0.914, + 0.804 + ], + "angle": 0, + "content": "[40] Xuebo Liu XintaoWang Yong Zhang Haoxin Chen Yang Liu Tieyong Zeng Raymond Chan Yaofang Liu, Xiaodong Cun and Ying Shan. 2024. Evalcrafter: Benchmarking and evaluating large video generation models. (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.805, + 0.914, + 0.824 + ], + "angle": 0, + "content": "[41] Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. 2025. LIMO: Less is More for Reasoning. arXiv preprint arXiv:2502.03387 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.826, + 0.914, + 0.854 + ], + "angle": 0, + "content": "[42] Ailing Zeng, Yuhang Yang, Weidong Chen, and Wei Liu. 2024. The Dawn of Video Generation: Preliminary Explorations with SORA-like Models. arXiv preprint arXiv:2410.05227 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.855, + 0.914, + 0.885 + ], + "angle": 0, + "content": "[43] Jingyi Zhang, Jiaxing Huang, Sheng Jin, and Shijian Lu. 2024. Vision-language models for vision tasks: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence (2024)." + }, + { + "type": "list", + "bbox": [ + 0.518, + 0.109, + 0.914, + 0.885 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.109, + 0.482, + 0.17 + ], + "angle": 0, + "content": "[44] Yaowei Zheng, Richong Zhang, Junhao Zhang, Yanhan Ye, Zheyan Luo, Zhangchi Feng, and Yongqiang Ma. 2024. LlamaFactory: Unified Efficient Fine-Tuning of \\(100+\\) Language Models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations). Association for Computational Linguistics, Bangkok, Thailand. http://arxiv.org/abs/2403.13372" + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.17, + 0.482, + 0.192 + ], + "angle": 0, + "content": "[45] Zangwei Zheng, Xiangyu Peng, Tianji Yang, Chenhui Shen, Shenggui Li, Hongxin Liu, Yukun Zhou, Tianyi Li, and Yang You. 2024. Open-sora: Democratizing" + }, + { + "type": "list", + "bbox": [ + 0.086, + 0.109, + 0.482, + 0.192 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.541, + 0.109, + 0.88, + 0.12 + ], + "angle": 0, + "content": "efficient video production for all. arXiv preprint arXiv:2412.20404 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.12, + 0.913, + 0.15 + ], + "angle": 0, + "content": "[46] Hengguang Zhou, Xirui Li, Ruochen Wang, Minhao Cheng, Tianyi Zhou, and Cho-Jui Hsieh. 2025. R1-Zero's \"Aha Moment\" in Visual Reasoning on a 2B Non-SFT Model. arXiv preprint arXiv:2503.05132 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.15, + 0.913, + 0.192 + ], + "angle": 0, + "content": "[47] Jiashuo Yu Fan Zhang Chenyang Si Yuming Jiang Yuanhan Zhang Tianxing Wu Qingyang Jin Nattapol Chanpaisit Yaohui Wang Xinyuan Chen Limin Wang Dahua Lin Yu Qiao Ziqi Huang, Yinan He and Ziwei Liu. 2023. Vbench: Comprehensive benchmark suite for video generative models. (2023)." + }, + { + "type": "list", + "bbox": [ + 0.518, + 0.109, + 0.913, + 0.192 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10358/19c9a29c-ccbf-4591-9305-89a160f95b8c_origin.pdf b/data/2025/2504_10xxx/2504.10358/19c9a29c-ccbf-4591-9305-89a160f95b8c_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..1255af80d632262530bdb389fff7c4d4c5746be0 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10358/19c9a29c-ccbf-4591-9305-89a160f95b8c_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ab6b1783afa74bd77fde78291555dae14254166603b37c94eddf3e648c814f1 +size 3077354 diff --git a/data/2025/2504_10xxx/2504.10358/full.md b/data/2025/2504_10xxx/2504.10358/full.md new file mode 100644 index 0000000000000000000000000000000000000000..265ec39e9b402d1e4c396e979bdeea271ec1ef72 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10358/full.md @@ -0,0 +1,340 @@ +# FingER: Content Aware Fine-grained Evaluation with Reasoning for AI-Generated Videos + +Rui Chen + +chenrui.chen@alibaba-inc.com + +AMAP, Alibaba Group + +Beijing, China + +Lei Sun + +ally.sl@alibaba-inc.com + +AMAP, Alibaba Group + +Beijing, China + +Jing Tang + +guangyu.tj@alibaba-inc.com + +AMAP, Alibaba Group + +Beijing, China + +Geng Li + +xiaofeng/lg@alibaba-inc.com + +AMAP, Alibaba Group + +Beijing, China + +Xiangxiang Chu + +chuxiangxiang.cxx@alibaba-inc.com + +AMAP, Alibaba Group + +Beijing, China + +# Abstract + +Recent advances in video generation have posed great challenges in the assessment of AI-generated content, particularly with the emergence of increasingly sophisticated models. The various inconsistencies and defects observed in such videos are inherently complex, making overall scoring notoriously difficult. In this paper, we emphasize the critical importance of integrating fine-grained reasoning into video evaluation, and we propose FingER, a novel entity-level reasoning evaluation framework that first automatically generates Fine-grained Entity-level questions, and then answers those questions by a Reasoning model with scores, which can be subsequently weighted summed to an overall score for different applications. Specifically, we leverage LLMs to derive entity-level questions across five distinct perspectives, which (i) often focus on some specific entities of the content, thereby making answering or scoring much easier by MLLMs, and (ii) are more interpretable. Then we construct a FingER dataset, consisting of approximately 3.3k videos and corresponding 60k fine-grained QA annotations, each with detailed reasons. Based on that, we further investigate various training protocols to best incentivize the reasoning capability of MLLMs for correct answer prediction. Extensive experiments demonstrate that a reasoning model trained using Group Relative Policy Optimization (GRPO) with a cold-start strategy achieves the best performance. Notably, our model surpasses existing methods by a relative margin of $11.8\%$ on GenAI-Bench and $5.5\%$ on Monet-Bench with only 3.3k training videos, which is at most one-tenth of the training samples utilized by other methods. Our code and dataset will be released soon. + +# 1 Introduction + +Recent advancements in Text-to-Video (T2V) generative models [2, 4, 45] have demonstrated significant progress in producing visually appealing and content-rich videos. For instance, post-Sora models such as Kling have shown the ability to generate high-resolution videos that closely adhere to textual prompts. However, these models often produce localized artifacts, inconsistencies, and violations of physical laws. These issues highlight the necessity for the development of robust and reliable quality assessment methods for AI-generated video content. + +Early research on evaluating AI-generated videos has primarily relied on feature-based metrics, such as the Frechet Video Distance (FVD) [30] and optical flow-based methods like RAFT [26]. While + +![](images/7d6921f0086a2ed04a58acfd7dec5e0ba42ca4b1e6fb8d80da3e0245f2a73809.jpg) +(a) + +![](images/9e3526d378290d0e5f1f5b7280de9f4ac5ef4c8a53b37682de9d2eef40d2c009.jpg) +(b) +(c) +(d) +Figure 1: Advanced generation models often exhibit localized defects while maintaining overall visually appealing, as illustrated in (a), which requires fine-grained in-depth understanding. (b) and (c) show that even with detailed instructional prompts and entity-level questions, GPT-4o still fails to identify this hand deformation. (d) shows the effectiveness of our work by integrating reasoning model with fine-grained entity-level questions. + +these methods effectively assess overall visual quality and dynamic characteristics, they fall short in capturing nuanced aspects that require deeper semantic understanding and fine-grained reasoning. To address these limitations, recent studies have introduced MLLMs for more comprehensive evaluations. For example, VideoScore [11] proposes a framework that evaluates five distinct aspects of video quality using an MLLM to assign scores ranging from 1 to 4. VisionReward [37] aligns video generation with human perception by formulating predefined judgment questions and fine-tuning a video-based MLLM to compute weighted scores. Similarly, LiFT [33] + +learns a reward model that provides reasons and scores across multiple aspects to align the generation model with human preferences. Despite these advancements, two key challenges persist: + +(i) Inadequacy of Fine-grained Video Reasoning: Although advanced generative models have significantly improved global visual quality by reducing issues such as blurriness and flickering, they still exhibit localized spatiotemporal inconsistencies, distortions, unnatural artifacts, and violations of physical laws, especially in scenarios involving complex motion or multiple entities. For instance, Fig 1(a) shows a video generated by Pixverse that, despite its high overall visual appeal, contains a noticeably deformed hand in a localized area. This example underscores the need for more fine-grained and context-aware reasoning capabilities in video understanding, moving beyond superficial visual pattern recognition to incorporate temporally grounded and semantically rich analysis. (ii) Domain Gap in AI-Generated Videos: Current state-of-the-art MLLMs struggle to capture the intrinsic characteristics of AI-generated videos, even with well-defined prompts. As illustrated in Fig 1(b) and (c), GPT-4o misidentifies the deformed hand in a video and assigns a high score based on misleading explanations. This issue is primarily attributed to a domain gap between the training data used by MLLMs and the unique features of AI-generated videos. In essence, AI-generated videos can deceive MLLMs in certain latent feature spaces. Bridging this gap requires a high-quality dataset of AI-generated videos. Moreover, developing strategies to enhance the generalization of MLLMs to AI-generated videos remains an open challenge. + +Inspired by the Question Generation and Answering (QG/A) framework [6] and recent reasoning works [7, 21, 22, 46] that demonstrate a significant self-emergence of complex cognitive reasoning abilities induced by Deepseek R1 [10], we argue that incorporating fine-grained reasoning abilities would significantly enhance the video quality assessment. In this paper, we propose FingER, a novel framework that first decomposes the overall evaluation into fine-grained entity-level questions and then answers these questions with corresponding scores by a reasoning model, which is fine-tuned on our high-quality dataset using GRPO with a cold-start initialization. Specifically, we employ five distinct aspects as defined in VideoScore [11], including text-to-video alignment, temporal consistency, factual consistency, dynamic degree, and visual quality. By deriving such fine-grained entity-level questions, our framework not only enables the model to explicitly focus on specific characteristics of certain entities, thereby facilitating a more fine-grained understanding, but also enhances interpretability through these structured QA pairs. + +Based on these questions, we prompted several strong MLLMs [13, 25] to provide answers. However, we observed that these models struggle to provide correct answers, particularly in aspects like factual consistency. As stated before, we attribute this to the lack of high-quality AI-generated video datasets and the inadequate reasoning capabilities of current models. Therefore, we curated a fine-grained AI-generated video reasoning dataset, FingER-Instruct-60k, which consists of $3.3\mathrm{k}$ AI-generated videos sourced from advanced generation models like Kling, Luma, Vidu, PixVerse, CogVideoX [38], etc. For each video, we generate fine-grained questions and annotate them with 'Yes/No'. To ease human labor and + +also reduce potential errors, we leverage MLLMs to generate detailed reasoning explanations given each question and its answer. (Note that, while MLLMs often struggle to answer these questions correctly, they demonstrate higher possibilities of producing coherent reasoning when the answer is explicitly provided, suggesting the presence of underlying reasoning capabilities.) These generated reasons were subsequently re-checked and refined by human annotators to ensure accuracy and quality. At last, we collect 60k fine-grained QA annotations with high-quality detailed reasons. + +To enhance the video reasoning capabilities, we choose Qwen2.5-VL [1], and explore multiple training protocols on our dataset, including directly training with answers, training with reasons, zero GRPO training and GRPO training with a cold-start initialization. Our experiments reveal that integrating high-quality reasons can largely increase the performance along with the interpretability, and GRPO with cold-start can further enhance its performance, especially in dimensions that require in-depth understanding. We also test our reasoning model in a zero-shot manner on public benchmarks, and still consistently achieve state-of-the-art performance. + +In summary, we propose an entity-level quality assessment framework with strong reasoning and generalization capabilities. To the best of our knowledge, our work is the first to introduce entity-level reasoning into the quality assessment of AI-generated videos. + +Our contributions can be summarized as follows: + +- Novel Evaluation Approach. We propose a novel evaluation approach FingER, designed for practical AI-generated video quality assessment. It comprises an entity-level question generation module and a video reasoning model that provides corresponding scores. By emphasizing fine-grained reasoning, our approach effectively addresses localized defects in AI-generated videos that require in-depth understanding and significantly enhances interpretability. +- Fine-grained Reasoning Dataset. We present a new dataset for AI-generated video reasoning, containing 3.3k videos and 60k entity-level QA annotations sourced from advanced generation models. Each QA pair is annotated with detailed reasons. This dataset aims to further advance research in this field. +- Enhanced Training Protocols. We explore multiple training protocols to enhance the fine-grained video reasoning capabilities of MLLMs. Notably, we are the first to introduce GRPO training into AI-generated video quality assessment, which proves to be highly effective in improving both reasoning and generalization abilities +- **Strong Performance.** Extensive experiments demonstrate the effectiveness of our approach. We achieve state-of-the-art performance on public benchmarks using only one-tenth of the training videos, thereby highlighting the superior generalization capability of our model. + +# 2 Related Work + +# 2.1 Video Quality Assessment + +Early approaches relied on feature-based metrics, such as Fréchet Video Distance (FVD) [30], Inception Score (IS) [28], and CLIPSim [29]. And benchmark works like EvalCrafter [40] and VBench [47] introduced comprehensive evaluation frameworks with 18 and 16 + +![](images/7e79c5f5413e855405378af10caaf9c9af8f05fc58ac361b7acede64c70a4a2e.jpg) +Figure 2: The overview of our proposed FingER framework, including (a) the evaluation pipeline, (b) FingER-Instruct-60k dataset curation, and (c) GRPO training of our reasoning model. + +![](images/af4c5252e3a743d7a1ba6d4c688efce7265a4c7b40a1e0158ee15f39758821a2.jpg) + +metrics, respectively. However, these methods fall short in assessing deep semantic understanding or aligning with human perception. + +With the rapid advancement of MLLMs [1, 5, 8, 25], increasing studies have explored to leverage their capabilities to facilitate image/video quality evaluation [6, 12, 18, 19, 36]. Inspired by DSG [6], which uses question generation/answering (QG/A) for interpretable assessment, T2VScore [14] adopted a QA framework for T2V alignment. T2VQA [27] introduced the T2VQA-DB dataset, comprising 10k videos annotated with Mean Opinion Scores (MOS), and trained a transformer-based model to predict these scores. Similarly, VideoScore [11] proposed a larger dataset across five dimensions and employed a MLLM for scoring. VMBench [20] introduced perception-aligned motion metrics to evaluate motion quality. While these methods predict scores or labels, they often overlook the reasoning behind assessments, limiting their effectiveness. Our work distinguishes itself by incorporating entity-level reasoning for evaluating advanced generation models reliably. + +Another line of research focuses on reward models for improving generative models via Reinforcement Learning from Human Feedback (RLHF), such as Diffusion-DPO [32], VisionReward [37] and UnifiedReward [34]. While these efforts target generative model optimization, our work emphasizes practical video quality evaluation, we expect it is able to further benefit the generation models using RLHF in future work. + +# 2.2 Reasoning Inference in Large Models + +Reasoning inference aims to emulate human-like thinking processes by forming the final answer through a Large Language Model (LLM). Specifically, to answer a given question, an LLM is required to think divergently and record the thinking processes, which are subsequently referenced when formulating the final answer. This approach has inspired a variety of research, including prompting-based Chain-of-Thought (CoT) [35], planning-based Graph-of-Thought [3] and Tree-of-Thought [39] processing, reward methods [16], and supervised fine-tuning (SFT) datasets with sufficient context [41]. Notably, DeepSeek-R1 [10] integrates specific prompts with reinforcement learning (RL), enabling the model to first generate the thinking process before producing the final answer. This method allows for supervised fine-tuning with a small amount of annotated data containing thinking processes, followed by reinforcement learning fine-tuning on more data without thinking processes. A very recent approach [7] proposes a highly simplified reinforcement learning framework and demonstrates its validity across several benchmarks. + +# 3 Method + +In this section, we first introduce our entity-level video quality assessment framework - FingER in Sec. 3.1. Then, we detail the data curation pipeline of our proposed dataset, namely FingER-Instruct-60k in Sec. 3.2. In the end, we combine multiple training methods with our proposed instruction tuning dataset, from the + +basic supervised fine-tuning (SFT), to reasoning training with reinforcement learning (RL), as detailed in Sec. 3.3. + +# 3.1 Entity-level VQA Framework + +For Text-to-Video (T2V) generation task, user input prompt is the only key instruction for generative models to understand and generate content that well-aligned with user's intent. To perform entity-level quality assessment of AI-generated videos, we start from understanding the user's prompt through extracting entities, attributes, and actions within itself. Inspired by DSG [6] in Text-to-Image (T2I) evaluation, we also utilize closed-source Large-Language-Model (LLM) to perform textual understanding and the following entity extraction. As shown in Fig. 4, we provide abundant in-context learning (ICL) [35] examples from different video generation scenarios and formulate the final input for GPT-4o [13], in which way we can harvest more steady entity extraction results. + +With entities extracted from the user's prompt, we generate entity-level questions from five distinct video quality assessment dimensions, including visual quality, text-to-video alignment, temporal consistency, factual consistency, and dynamic degree. For each dimension, we provide a detailed explanation followed by several key points, formulating the context information when prompting the LLM. We also prepare adequate entity-level in-context learning examples, which are summarized from videos with and without obvious artifacts or hallucinations. In this way, we can help the LLMs to better understand which question should be asked when coping with a specific entity along with the given assessment dimension. In short, we break down the granularity of fine-grained video quality assessment from multi-dimensional level to entity-level. And the intuition behind entity-level question generation is that we hope fine-grained question/answering can guide the MLLM to focus on understanding the correlation between entity-level textual description and its corresponding visual appearance based on the video content. + +After the entity-level question generation procedure, our fin-tuned MLLM answers the above questions with a simple "Yes" or "No", along with a detailed reasoning process explaining why the answer is that. Learning the logical reasoning process is critical for model performance improvements, as detailed in the experiment Sec. 4.4. The outputted reason can also be useful when conducting practical video quality assessment, which is more interpretable and user-friendly. To formulate a final score representing the overall quality of AI-generated videos, we start by calculating the probability of the answer token ("Yes" or "No") for each entity-level question to represent the entity-level score. Since there are multiple "Yes" and "No" with different formats but similar meanings in the vocabulary of our MLLM, we first gather the token set for "Yes" and "No". In this paper, we take ["Yes", "yes", "YES", "Yes", "Yes"] as the token set for answer "Yes", and ["No", "no", "NO", "No", "No"] for answer "No", denoted by $T_{Y}$ and $T_{N}$ , respectively. With logits from the answer token, we extract all the logit whose token id is within the token set, and apply softmax over $T_{Y} \cup T_{N}$ , as illustrated in Eq. 1. Then, given the entity-level question $q$ , we can get the answer's probability $P(No \mid q)$ and $P(Yes \mid q)$ with a simple sum + +up. + +$$ +P (N o \mid q) = \sum_ {\substack {i = 1 \\ m}} ^ {n} \text {S o f t m a x} (x _ {i}), x _ {i} \in T _ {N}; \tag{1} +$$ + +$$ +P (Y e s \mid q) = \sum_ {j = 1} ^ {m} S o f t m a x (y _ {j}), y _ {j} \in T _ {Y}. +$$ + +Instead of directly using the derived probability as the entity-level score, we still need the judgment on whether the question is positive or negative. For example, given the question "Do the attributes of the table in the video (such as size, shape, and material) align with real-world characteristics?" from the factual consistency dimension, it is apparent that the factual consistency of the assessed video goes up with a positive "Yes" answer. We define this type of question as a positive one, and vice versa. We denote the status of an entity-level question with $q_{stat}$ , if $q_{stat}$ equals 1, it means that the question is positive; otherwise, the question is negative. + +$$ +S _ {\text {e n t i t y}} = \left\{ \begin{array}{l l} P (N o \mid q), & \text {i f} q _ {\text {s t a t}} = 0; \\ P (Y e s \mid q), & \text {i f} q _ {\text {s t a t}} = 1. \end{array} \right. \tag {2} +$$ + +With the aforementioned preparations setup, we propose our entity-level score $S_{entity}$ , which correlates positively with the quality of the assessed video. When the entity-level question is positive, we use the probability of the "Yes" answer $P(Yes \mid q)$ to represent the score it can gain. And we utilize the probability of the "No" answer $P(No \mid q)$ if the question is negative, as illustrated in Eq. 2. In short, our intuition behind this design is that as long as the video quality goes up with which answer, we calculate our entity-level score based on that answer's probability. Then, we utilize entity-level question/answering pairs that are under the same quality assessment dimension to formulate our dimension-level score $S_{dim}$ . To be specific, we simply calculate the linear summation of multiple answers' probability $S_{entity}$ , as illustrated in Eq. 3. + +$$ +S _ {d i m} = \sum_ {i = 1} ^ {N} S _ {e n t i t y} i \tag {3} +$$ + +In the end, we derive the overall-level score $S_{\text{overall}}$ with the weighted average of five distinct dimension scores $S_{dim}$ in Eq. 4. + +$$ +S _ {\text {o v e r a l l}} = \sum_ {i = 1} ^ {5} w _ {i}. S _ {\text {d i m}} i \tag {4} +$$ + +In short, we propose the entity-level VQA framework FingER, which consists of three parts: (i) entity-level question generation, (ii) the fine-tuned MLLM with reasoning output, and (iii) the hierarchical scoring function that converts token probability to multi-level scores. + +# 3.2 Entity-level Dataset with Reasoning + +In this section, we introduce the construction pipeline of our entity-level instruction tuning dataset, named FingER-Instruct-60k. + +3.2.1 Prompt and T2V Model Selection. Based on VideoGenEval [42] dataset, our instruction tuning dataset is composed of 420 diverse text prompts and 3.3k AI-generated videos produced by 8 modern T2V models, including closed-source models: Kling, Luma, PixVerse, Vidu, Qingying, and open-sourced models: Mochi-1 [24], CogVideoX [38], Open-Sora [45]. We utilize all 420 text + +prompts from the T2V session [45], which cover a diverse range of complex scenarios, including human-centric activities, material and spatial relationships, as well as animal and text generations. These prompts are derived from real-life user inputs. As for the T2V model selection, we denote models that understand and obey most of the common sense and physical laws, and generate time-consistent videos without obvious temporal distortions as the high-quality model. We select the generative models uniformly based solely on the quality of their generated videos, spanning from high-quality models to average-quality models, for a more diverse training data distribution. + +3.2.2 Entity-level Question Generation and Annotation. Our multi-dimensional entity-level question generation starts with understanding users' input prompts and extracting the entities within. We use GPT-40 [13] for prompt understanding and entity extraction, with abundant in-context learning examples provided. Then, we perform the entity-level question generation for our five distinct assessment dimensions. For each entity, we prompt the LLM with task introduction, assessment dimension explanation with several key points to focus on, user's input prompt, the extracted entity, and the most important in-context learning examples. And we extract the generated questions with regular expression matching. + +For data annotation, we engaged 10 professional annotators to complete the task of annotating $60\mathrm{k}$ question/answer pairs. Inter-annotator agreement was ensured through multiple rounds of small-scale pilot annotations, and the entire process took approximately one month to complete. + +3.2.3 Reasoning Generation and Verification. We employ the powerful closed-source MLLM [25] to generate the initial version of the reasoning process. Specifically, we prompt the MLLM with the assessment dimension explanation, user prompt, in-context learning examples, and the entity-level question along with its human-annotated result. An interesting finding is that when the MLLM is provided with the correct answer to the entity-level question, the generated reasoning process for explaining the answer is more reasonable than when directly generating the answer and its reason. Rather than using the MLLM-generated reasoning process directly, we conduct thorough human verification to ensure the quality of our reasoning training data. + +With aforementioned entity-level questions, human-annoted answers and detailed reasoning process, we formulate our instruction tuning dataset FingER-Instruct-60k, which serves as our basis for the model training in the next section. + +# 3.3 Instruction Tuning and GRPO Training + +We use Qwen2.5-VL-7B-Instruct [1] as our base model and apply supervised fine-tuning, SFT with reasoning and reinforcement learning on it. + +3.3.1 Supervised Fine-Tuning. We directly train the base model on FingER-Instruct-60k, the response of model only contains "Yes" or "No" answer following the next token prediction paradigm. It means the model only needs to learn predicting the correct answer without any reasoning process. The loss function is Cross-Entropy + +Loss: + +$$ +\mathcal {L} _ {C E} = - \sum_ {i = 1} ^ {N} y _ {i} \log \left(p _ {i}\right) \tag {5} +$$ + +3.3.2 Supervised Fine-Tuning with Reasoning. We also train base model on FingER-Instruct-60k, but the difference compared to Supervised Fine-Tuning is the model needs to learn predicting the correct answer within $<$ answer $>$ ... $<$ /answer $>$ tag and its reasoning processes within $<$ reason $>$ ... $<$ /reason $>$ tag. We apply prompt engineering on the input tokens to reach this difference. The loss also contains the gap of reasoning processes and the gap of answers. + +3.3.3 GRPO Training. We employ GRPO [23] to enhance reasoning inference performance, exploring two protocols: (i) Zero-GRPO, which relies solely on reinforcement learning without initial supervised data; and (ii) GRPO with cold-start Supervised Fine-Tuning, which combines initial supervised learning with subsequent reinforcement optimization. + +Zero-GRPO. Zero-GRPO is an exploratory attempt that is initiated directly from Qwen-2.5-VL [1] and uses RL to implicitly improve reasoning abilities without annotated reason. For each video question pair, we first sample a group of outputs $\{o_1,o_2,\dots,o_G\}$ by old policy $\pi_{\theta_{old}}(o_i|v,q)$ , $v$ denotes the video that needs to be evaluated, $q$ denotes the question for each entity and dimension. Then update the policy model $\pi_{\theta}$ by minimizing the following loss. + +$$ +\begin{array}{l} \mathcal {L} _ {G R P O} (\theta) = - \mathbb {E} [ q \sim P (Q), \{o _ {i} \} _ {i = 1} ^ {G} \sim \pi_ {\theta_ {o l d}} (O | v, q) ] \\ \frac {1}{G} \sum_ {i = 1} ^ {G} \left(\min \left(\frac {\pi_ {\theta} (o _ {i} | v , q)}{\pi_ {\theta o l d} (o _ {i} | v , q)} * A d v _ {i}, \right. \right. \\ \left. \operatorname {c l i p} \left(\frac {\pi_ {\theta} \left(o _ {i} \mid v , q\right)}{\pi_ {\theta o l d} \left(o _ {i} \mid v , q\right)}, 1 - \epsilon , 1 + \epsilon\right) * A d v _ {i}\right) \tag {6} \\ \left. + \beta \mathbb {D} _ {K L} \left(\pi_ {\theta} | | \pi_ {r e f}\right)\right) \\ \end{array} +$$ + +$$ +\mathbb {D} _ {K L} \left(\pi_ {\theta} \| \pi_ {r e f}\right) = \frac {\pi_ {r e f} \left(o _ {i} | v , q\right)}{\pi_ {\theta} \left(o _ {i} | v , q\right)} - l o g \frac {\pi_ {r e f} \left(o _ {i} | v , q\right)}{\pi_ {\theta} \left(o _ {i} | v , q\right)} - 1 \tag {7} +$$ + +$\beta$ denotes the coefficient of Kullback-Leibler Divergence [15] between base model and policy model, $\epsilon$ denotes the threshold of clip. $Adv_{i}$ is the advantage which is the normalization of a group of rewards $\{r_1,r_2,\dots,r_G\}$ computed from outputs within each group: + +$$ +A d v _ {i} = \frac {r _ {i} - M e a n \left\{r _ {1} , r _ {2} , \dots , r _ {g} \right\}}{\operatorname {S t d} \left\{r _ {1} , r _ {2} , \dots , r _ {G} \right\}} \tag {8} +$$ + +$r_i$ is composed of two reward functions: + +$$ +r _ {i} = r _ {\text {a c c u r a c y} _ {i}} + r _ {\text {f o r m a t} _ {i}} \tag {9} +$$ + +$$ +r _ {\text {a c c u r a c y} _ {i}} = \left\{ \begin{array}{l l} 1. 0 & \text {i f a n s w e r} _ {i} = G T _ {i} \\ 0. 0 & \text {e l s e} \end{array} \right. \tag {10} +$$ + +$$ +r _ {\text {f o r m a t} i} = \left\{ \begin{array}{l l} 1. 0 & \text {i f} o _ {i} \text {i n c l u d e s c o r r e c t f o r m a t} \\ 0. 0 & \text {e l s e} \end{array} \right. \tag {11} +$$ + +Correct format means the output $o_i$ contains two tags: + +$< \text{answer}>\ldots < / \text{answer}>$ and $< \text{reason}>\ldots < / \text{reason}>$ + +"Yes" or "No" token only appears within the answer tag, and the reasoning process only appears within reason tag. + +GRPO with cold-start Supervised Fine-Tuning. DeepSeek-R1 demonstrated that fine-tuning on an annotated dataset with reasoning processes before applying reinforcement learning (RL) yields better performance than directly using RL [10]. We adopt this approach in our supervised fine-tuning model. The sole difference between Zero-GRPO and GRPO with cold-start Supervised Fine-Tuning lies in the base model: the latter is initialized from a model pre-trained on annotated data containing reasoning processes. + +# 4 Experiments + +# 4.1 Datasets and Evaluation Metrics + +4.1.1 Datasets. We split 185 generated videos (around $5\%$ of whole data) with 3.5k entity-level questions from 5 distinct quality assessment dimensions to formulate our FingER-test dataset. Regarding the public benchmarks, we adopt the popular GenAI-Bench [17] and recently released MonetBench [37] for performance evaluation. GenAI-Bench contains 800 unique text prompts paired with 4 T2V models, and each generated video has MOS (Mean Opinion Scores) annotated by 3 annotators. MonetBench consists of 1000 different text prompts, each paired with 2 T2V models. Each pair of videos is generated with the same prompt but different video generation models. MonetBench annotates the video pair with human preferences, including "win", "lose", and "tie" options. + +4.1.2 Evaluation Metrics. We report the accuracy (Acc) of "Yes" or "No" answers, the Pearson linear correlation coefficient (PLCC), and the Spearman rank correlation coefficient (SRCC) on our proposed FingER-test dataset. We evaluate our models with and without token probability calculation, denoted by $(w / o\text{prob})$ and $(w/\text{prob})$ in Tab. 1 and Tab. 2. Following previous works in [11, 19], we utilize the SRCC and the PLCC for evaluating model's performance on GenAI-Bench. And we use pairwise accuracy as the metrics for human preference evaluation on MonetBench and report tau and diff, followed [9, 43]. + +# 4.2 Implementation Details + +Based on Qwen-2.5-VL-7B [1], we fine-tune our model with the following experiment settings: learning rate of $5.0\mathrm{e - 6}$ , global batch size of 32, video input fps (frame-per-second) is set to 2, and video maximum input resolution is set to $448\times 448$ pixels. We utilize LLaMA-Factory [44] as our supervised fine-tuning (SFT) codebase. We perform SFT on our proposed FingER-Instruct-60k dataset for 2 epochs with 8 NVIDIA H20 GPUs, and the training steps are the same for the model trained with extra reasoning process. As for the settings of our reinforcement learning (RL) experiments, we employ Huggingface-TRL [31] as our RL fine-tuning tool with following hyper-parameters to implement GRPO: $\beta = 0.04$ , and the number of group $G = 16$ , $\epsilon = 0.2$ , $\mu = 1$ , the initial learning rate of RL is $5.0\mathrm{e - 7}$ . We train Zero-GRPO and GRPO with cold-start for 2k steps on 4 NVIDIA H20 GPUs. + +# 4.3 Zero-shot Performance on FingER-test + +We report the zero-shot performance of Qwen2.5-VL across five dimensions on our dataset. Through ablations on resolution, frame + +![](images/f6db93766cb5f466189876315472945b0af1547224588b7f6baaa83e5488d4b6.jpg) +Figure 3: Zero-shot performance on five distinct assessment dimensions with different input resolution and fps. + +rate (fps), and evaluation granularity, we reveal the capabilities of the base model to handle different dimensions, and further demonstrate the crucial importance of integrating entity-level evaluation. + +Increasing resolution and fps leads to slight improvements. Fig. 3 illustrates the accuracy across five dimensions when prompted with entity-level questions. We can see that the accuracy curves show slight improvements with increasing resolutions or frame rates (fps), albeit at a significant computational cost. These results suggest that resolution and fps are not the primary factors of performance enhancement. Consequently, for efficiency we adopt $448 \times 448$ pixels and 2 fps as the default settings for subsequent zero-shot and supervised fine-tuning (SFT) experiments. + +Performance varies significantly across different dimensions. As shown in Fig. 3, the zero-shot accuracy for visual quality is exceptionally low at $26.1\%$ , while factual consistency achieves $57.6\%$ . In contrast, dimensions like text alignment show higher accuracy at $80.59\%$ , likely due to the base model's inherent capabilities from pre-training on caption data. We believe that the notably low accuracy in visual quality is primarily attributed to misalignment from AI-generated videos, and the main challenges still lie in dimensions requiring in-depth reasoning, such as factual consistency, temporal consistency, and text alignment, which will be further demonstrated in the following section. + +Integrating entity-level evaluations brings a substantial performance gain. To validate the efficacy of our entity-level QA framework, we conduct experiments across three evaluation granularities: overall level, dimension level, and our proposed entity level, as detailed in Tab. 1. The overall level (1st row) prompts the model with an overall assessment rating from 1 to 4, accompanied by detailed evaluation criteria, while the dimension level (2nd row) prompts model to rate each dimension from 1 to 4, which are then averaged to get a final score. The results of our proposed entity-level (3rd and 4th rows) are reported with and without a probability calculation strategy introduced in Sec. 3.1, and furthermore, we instruct the model to provide explanatory reasoning along with answers (last two rows). Compared to the entity-level framework, both the overall and dimension levels exhibit substantial performance degradation across all dimensions, indicating that fine-grained evaluation substantially enhances the model's performance. It is worth noting that incorporating explanatory reasoning does not bring improvements, revealing the inherent limitations of the base model in understanding AI-generated videos. + +Table 1: Correlation between model Zero-shot answer and human reference on FingER-test + +
MethodVisual QualityTemporalDynamic DegreeText AlignmentFactualOverall
Qwen2.5-VLAcc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCC
Overall Level------/30.68/29.27
Dimension Level-/35.06/35.54-/16.05/17.06-/14.81/14.09-/33.68/32.62-/13.86/12.28-/52.32/61.14
Entity (w/o prob)25.33/1.85/5.2278.72/83.26/83.9172.87/51.04/48.9881.6/70.68/73.4458.34/51.03/53.2766.50/80.86/83.71
Entity (w/ prob)25.33/40.60/40.9478.72/84.51/85.4472.87/56.48/56.8581.6/74.09/76.4958.34/57.45/58.6766.50/81.23/85.26
+Reason (w/o prob)45.71/49.97/49.6177.65/83.12/83.8975.21/54.30/52.8781.08/73.24/75.3140.51/17.43/23.5563.96/73.40/79.15
+Reason (w/ prob)45.71/46.29/49.6477.65/84.60/83.8975.21/48.88/52.8081.08/72.38/75.3540.51/29.27/23.5063.96/73.29/79.18
+ +Table 2: Correlation between SFT/RL model answer and human reference on FingER-test (Z-GRPO means Zero-GRPO) + +
MethodVisual QualityTemporalDynamic DegreeText AlignmentFactualOverall
Acc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCC
GPT-4o [13]62.19/56.24/57.9377.83/78.64/79.1368.31/54.14/57.0283.41/72.20/74.3358.77/48.93/49.5169.92/81.25/82.36
VideoScore [11]-/22.80/18.55-/23.84/26.06-/9.49/7.18-/19.18/13.87-/22.93/18.31-/20.39/17.68
Qwen2.5-VL [1]25.33/40.60/40.9478.72/84.51/85.4472.87/56.48/56.8581.6/74.09/76.4958.34/57.45/58.6766.50/81.23/85.26
Z-GRPO (w/o prob)76.01/73.39/70.4678.01/83.13/83.8277.93/69.74/68.4784.46/73.80/75.9955.21/47.47/50.3374.51/83.46/86.56
Z-GRPO (w/ prob)76.01/71.83/71.9778.01/81.81/83.8677.93/67.49/68.5184.46/74.38/76.2855.21/42.21/50.1574.51/83.24/86.82
FingER (w/o prob)83.78/83.48/82.5383.33/83.13/83.7083.23/71.37/67.9582.77/70.94/73.7572.89/64.12/64.6181.25/88.87/89.67
FingER (w/ prob)83.78/85.31/85.2283.33/86.24/86.9983.23/77.07/74.7382.77/73.85/77.9872.89/70.99/69.2681.25/90.23/91.41
+Reason (w/o prob)84.05/81.51/81.0084.04/85.88/86.6382.49/69.22/68.2286.79/77.87/79.7774.03/67.47/68.4182.33/89.79/91.64
+Reason (w/ prob)84.05/83.85/83.8784.04/86.51/87.0982.49/76.11/76.7086.79/79.34/83.1674.03/71.70/70.2782.33/90.31/92.04
+GRPO (w/o prob)82.30/80.62/78.0982.98/85.08/85.5781.63/65.54/64.9285.88/75.74/77.9174.04/68.65/70.7381.41/89.26/91.25
+GRPO (w/ prob)82.30/83.76/83.5182.98/86.64/87.4381.63/75.05/74.6885.88/78.32/82.6374.04/71.87/72.0381.41/90.43/92.41
+ +Table 3: Zero-shot Evaluation Results on Public Benchmarks + +
MethodGenAI-Bench[17]MonetBench[37]
SRCCPLCCtaudiff
GPT-4o[13]35.7936.6145.7048.30
Qwen2.5-VL[1]46.6244.2946.7044.27
VideoScore[11]42.2240.6249.1054.90
VQAScore[19]52.7050.6056.1059.50
Zero-GRPO49.5844.3951.3051.34
FingER54.1352.6053.9057.31
+ Reason56.6857.2557.8062.07
+ GRPO57.0356.5958.0062.80
+ +# 4.4 SFT and RL Performance on FingER-test + +In this section, we report the performance of our reasoning model on FingER-test using different training protocols including SFT with answers, SFT with reasons, zero GRPO, and GRPO with a cold start, we also provide results using the closed-source model GPT-40 and VideScore [11] for comparisons, as detailed in Tab. 2. Note that all these results, except for VideoScore [11], are obtained by entity-level evaluations for fair comparisons. + +Our model, trained with only answers, demonstrates significant performance improvements over the base model, achieving overall gains of 14.75/9.00/6.15 in Acc/SRCC/PLCC, respectively. Substantial improvements are observed in the dimensions of visual quality, + +dynamic degree, and factual consistency. Note that the improvement in the text alignment dimension is limited, mainly due to its inherent capabilities derived from pre-training data. + +Incorporating additional reasoning during training further boosts the performance, particularly in the dimensions of text alignment, factual consistency, and temporal consistency. For the text alignment dimension, the SFT with reasoning harvests performance gains with 4.02/5.49/5.18 in Acc/SRCC/PLCC. These improvements underscore the importance of in-depth video understanding to achieve higher performance in these dimensions. + +We further investigate the reasoning training using RL, which includes two kinds of training procedures: (1) Zero-GRPO, and (2) GRPO initialized with a cold-start from reasoning SFT training. The results presented in Table 2 reveal that Zero-GRPO fails to predict correct answers. Upon closer examination of the training process, we identified that the issue stems from the reasoning component. Zero-GRPO generates reasons that resemble captions rather than logical reasoning. In contrast, when GRPO is applied with a cold-start initialization from our reasoning SFT model, it is able to surpass the SFT model with only 1k additional training steps. Among these dimensions, we observed steady performance improvements in the temporal and factual consistency dimensions, with boosts of $1.15 / 0.88 / 2.77$ in factual consistency. We believe that the reasoning cold-start teaches the model to reason in a rough manner, while GRPO guides it towards adopting reasons with correct answers, thereby incentivizing the reasoning capability in the model. + +Moreover, we evaluate the performance on our proposed FingER-test dataset with closed-source MLLM [13] (1st row), and VideoScore + +Text Prompt: The camera follows a person standing alone by the lake, gazing at the distant sunset, with their reflection mirrored on the water's surface. + +![](images/54976c59ce4846e46e0766e44bd28c157ead9e397c4980a5824be6ab946e240b.jpg) +Figure 4: Qualitative results. We show several reasoning results outputted by our GRPO model. + +[11] (2nd row), our proposed FingER outperforms those methods with a large margin across all five assessment dimensions. + +# 4.5 Comparison on Public Benchmarks + +Tab. 3 demonstrates the consistent improvements achieved by our method on two public benchmarks. We compare our methods with GPT-40, Qwen2.5-VL and two other approaches. Specifically, with only Yes/No answer prediction, we already outperform all methods on GenAI-Bench, indicating the effectiveness of our fine-grained evaluation framework. Training with reasons and GRPO with a cold-start leads to further improvements with a final $8.21\% / 11.83\%$ SRCC/PLCC relative performance boost. On MonetBench, without any weight fitting, we just average scores of five dimensions, our method is able to achieve $3.39\% / 5.55\%$ relative improvements of tau/diff. It is worth noting that VideoScore [11] is trained using 37.6k training videos, while VQAScore [19] utilizes 665k samples, we outperform these methods with only 3.3k training videos without additional training samples from other sources, which is at most one-tenth of the training size adopted by other methods. + +# 5 Conclusion + +In this paper, we emphasize the critical importance of integrating fine-grained reasoning into AI-generated video quality assessment, and we propose FingER, an entity-level fine-grained quality assessment framework with five distinct evaluation dimensions for AI-generated videos. To bridge the gap between non-AI videos and AI-generated videos, we construct a high-quality dataset, FingER-Instruct-60k, which consists of 3.3k videos generated by modern T2V models and 60k entity-level question / answering / reasoning pairs. Based on this dataset, we explore multiple training protocols to best incentivize the model's reasoning capability, including reason SFT, zero GRPO and GRPO with a reasoning cold-start. Extensive experiments demonstrate that by utilizing GRPO training with a cold-start, our method not only achieves the best performance on our dataset, but also outperforms other methods and closed-source models on two public benchmarks. And it is worth noting that we achieve SOTA performance with only 3.3k training samples. + +# References + +[1] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. 2025. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923 (2025). +[2] Fan Bao, Chendong Xiang, Gang Yue, Guande He, Hongzhou Zhu, Kaiwen Zheng, Min Zhao, Shilong Liu, Yaole Wang, and Jun Zhu. 2024. Vudu: a highly consistent, dynamic and skilled text-to-video generator with diffusion models. arXiv preprint arXiv:2405.04233 (2024). +[3] Maciej Besta, Nils Blach, Ales Kubicek, Robert Gerstenberger, Michal Podstawski, Lukas Gianinazzi, Joanna Gajda, Tomasz Lehmann, Hubert Niewiadomski, Piotr Nczyk, et al. 2024. Graph of thoughts: Solving elaborate problems with large language models. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 38. 17682-17690. +[4] Tim Brooks, Bill Peebles, Connor Holmes, Will DePue, Yufei Guo, Li Jing, David Schnurr, Joe Taylor, Troy Luhman, Eric Luhman, et al. 2024. Video generation models as world simulators. 2024. URL https://openai.com/research/video-generation-models-as-world-simulators-3 (2024), 1. +[5] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. 2024. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 24185-24198. +[6] Jaemin Cho, Yushi Hu, Jason M Baldridge, Roopal Garg, Peter Anderson, Ranjay Krishna, Mohit Bansal, Jordi Pont-Tuset, and Su Wang. 2024. Davidsonian Scene Graph: Improving Reliability in Fine-grained Evaluation for Text-to-Image Generation. In ICLR. +[7] Xiangxiang Chu, Hailang Huang, Xiao Zhang, Fei Wei, and Yong Wang. 2025. GPG: A Simple and Strong Reinforcement Learning Baseline for Model Reasoning. arXiv preprint arXiv:2504.02546 (2025). +[8] Xiangxiang Chu, Limeng Qiao, Xinyu Zhang, Shuang Xu, Fei Wei, Yang Yang, Xiaofei Sun, Yiming Hu, Xinyang Lin, Bo Zhang, et al. 2024. MobilevIm v2: Faster and stronger baseline for vision language model. arXiv preprint arXiv:2402.03766 (2024). +[9] Daniel Deutsch, George Foster, and Markus Freitag. 2023. Ties Matter: Meta-Evaluating Modern Metrics with Pairwise Accuracy and Tie Calibration. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing. 12914-12929. +[10] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948 (2025). +[11] Xuan He, Dongfu Jiang, Ge Zhang, Max Ku, Achint Soni, Sherman Siu, Haonan Chen, Abhranil Chandra, Ziyan Jiang, Aaran Arulraj, et al. 2024. VideoScore: Building Automatic Metrics to Simulate Fine-grained Human Feedback for Video Generation. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing. 2105-2123. +[12] Hailang Huang, Yong Wang, Zixuan Huang, Huaqiu Li, Tongwen Huang, Xi-angxiang Chu, and Richong Zhang. 2024. MMGenBench: Evaluating the Limits of LMMs from the Text-to-Image Generation Perspective. arXiv preprint arXiv:2411.14062 (2024). +[13] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. 2024. GPT-4o System Card. arXiv preprint arXiv:2410.21276 (2024). +[14] Haoning Wu Xintao Wang Yixiao Ge Xiaodong Cun David Junhao Zhang Jia-Wei Liu Yuchao Gu Rui Zhao Weisi Lin Wynne Hsu Ying Shan Jay Zhangjie Wu, Guian Fang and Mike Zheng Shou. 2024. Towards A Better Metric for Text-to-Video Generation. arXiv:2401.07781 (2024). +[15] Solomon Kullback. 1951. Kullback-leibler divergence. +[16] Xin Lai, Zhuotao Tian, Yukang Chen, Senqiao Yang, Xiangru Peng, and Jiaya Jia. 2024. Step-dpo: Step-wise preference optimization for long-chain reasoning of llms. arXiv preprint arXiv:2406.18629 (2024). +[17] Baiqi Li, Zhiqiu Lin, Deepak Pathak, Jiayao Emily Li, Xide Xia, Graham Neubig, Pengchuan Zhang, and Deva Ramanan. 2024. GenAI-bench: A holistic benchmark for compositional text-to-visual generation. In Synthetic Data for Computer Vision Workshop@ CVPR 2024. +[18] Mingxing Li, Rui Wang, Lei Sun, Yancheng Bai, and Xiangxiang Chu. 2025. Next Token Is Enough: Realistic Image Quality and Aesthetic Scoring with Multimodal Large Language Model. arXiv preprint arXiv:2503.06141 (2025). +[19] Zhiqiu Lin, Deepak Pathak, Baiqi Li, Jiayao Li, Xide Xia, Graham Neubig, Pengchuan Zhang, and Deva Ramanan. 2024. Evaluating text-to-visual generation with image-to-text generation. In European Conference on Computer Vision. Springer, 366–384. +[20] Xinrang Ling, Chen Zhu, Meiqi Wu, Hangyu Li, Xiaokun Feng, Cundian Yang, Aiming Hao, Jiashu Zhu, Jiahong Wu, and Xiangxiang Chu. 2025. VMBench: A Benchmark for Perception-Aligned Video Motion Generation. arXiv preprint arXiv:2503.10076 (2025). + +[21] Zichen Liu, Changyu Chen, Wenjun Li, Penghui Qi, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. 2025. Understanding r1-zero-like training: A critical perspective. arXiv preprint arXiv:2503.20783 (2025). +[22] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. 2025. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785 (2025). +[23] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. 2024. Deepseemath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300 (2024). +[24] Genmo Team. 2024. Mochi 1. https://github.com/genmoai/models. +[25] Gemini Team, Petko Georgiev, Ving Ian Lei, Ryan Burnell, Libin Bai, Anmol Gulati, Garrett Tanzer, Damien Vincent, Zhufeng Pan, Shibo Wang, et al. 2024. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530 (2024). +[26] Zachary Teed and Jia Deng, 2020. Raft: Recurrent all-pairs field transforms for optical flow. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part II 16. Springer, 402-419. +[27] Zicheng Zhang Chunyi Li Haoning Wu Xiongkuo Min Guangtao Zhai Tengchuan Kou, Xiaohong Liu and Ning Liu. 2024. Subjective-aligned dataset and metric for text-to-video quality assessment. arXiv preprint arXiv:2403.11956 (2024). +[28] Wojciech Zaremba Vicki Cheung Alec Radford Tim Salimans, Ian Goodfellow and Xi Chen. 2016. Improved techniques for training gans. Advances in neural information processing systems, 29 (2016). +[29] Wojciech Zaremba Vicki Cheung Alec Radford Tim Salimans, Ian Goodfellow and Xi Chen. 2021. Chenfei Wu, Lun Huang, Qianxi Zhang, Binyang Li, Lei Ji, Fan Yang, Guillermo Sapiro, and Nan Duan. arXiv preprint arXiv:2104.14806 (2021). +[30] Thomas Unterthiner, Sjoerd Van Steenkiste, Karol Kurach, Raphael Marinier, Marcin Michalski, and Sylvain Gelly. 2019. FVD: A new metric for video generation. ICLR 2019 Workshop DeepGenStruct (2019). +[31] Leandro von Werra, Younes Belkada, Lewis Tunstall, Edward Beeching, Tristan Thrush, Nathan Lambert, Shengyi Huang, Kashif Rasul, and Quentin Galloudec. 2020. TRL: Transformer Reinforcement Learning. https://github.com/huggingface/trl. +[32] Bram Wallace, Meihua Dang, Rafael Rafailov, Linqi Zhou, Aaron Lou, Senthil Purushwalkam, Stefano Ermon, Caiming Xiong, Shafiq Joty, and Nikhil Naik. 2024. Diffusion model alignment using direct preference optimization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 8228-8238. +[33] Yibin Wang, Zhiyu Tan, Junyan Wang, Xiaomeng Yang, Cheng Jin, and Hao Li. 2024. Lift: Leveraging human feedback for text-to-video model alignment. arXiv preprint arXiv:2412.04814 (2024). +[34] Yibin Wang, Yuhang Zang, Hao Li, Cheng Jin, and Jiaqi Wang. 2025. Unified Reward Model for Multimodal Understanding and Generation. arXiv preprint arXiv:2503.05236 (2025). +[35] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems 35 (2022), 24824-24837. +[36] Haoning Wu, Zicheng Zhang, Weixia Zhang, Chaofeng Chen, Liang Liao, Chunyi Li, Yixuan Gao, Annan Wang, Erli Zhang, Wenxiu Sun, et al. 2024. Q-Align: Teaching LMMs for Visual Scoring via Discrete Text-Defined Levels. In International Conference on Machine Learning. PMLR, 54015-54029. +[37] Jiazheng Xu, Yu Huang, Jiale Cheng, Yuanming Yang, Jiajun Xu, Yuan Wang, Wenbo Duan, Shen Yang, Qunlin Jin, Shurun Li, et al. 2024. Visionreward: Fine-grained multi-dimensional human preference learning for image and video generation. arXiv preprint arXiv:2412.21059 (2024). +[38] Zhuoyi Yang, Jiayan Teng, Wendi Zheng, Ming Ding, Shiyu Huang, Jiazheng Xu, Yuanming Yang, Wenyi Hong, Xiaohan Zhang, Guanyu Feng, et al. 2024. Cogvideox: Text-to-video diffusion models with an expert transformer. arXiv preprint arXiv:2408.06072 (2024). +[39] Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Tom Griffiths, Yuan Cao, and Karthik Narasimhan. 2023. Tree of thoughts: Deliberate problem solving with large language models. Advances in neural information processing systems 36 (2023), 11809-11822. +[40] Xuebo Liu XintaoWang Yong Zhang Haoxin Chen Yang Liu Tieyong Zeng Raymond Chan Yaofang Liu, Xiaodong Cun and Ying Shan. 2024. Evalcrafter: Benchmarking and evaluating large video generation models. (2024). +[41] Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. 2025. LIMO: Less is More for Reasoning. arXiv preprint arXiv:2502.03387 (2025). +[42] Ailing Zeng, Yuhang Yang, Weidong Chen, and Wei Liu. 2024. The Dawn of Video Generation: Preliminary Explorations with SORA-like Models. arXiv preprint arXiv:2410.05227 (2024). +[43] Jingyi Zhang, Jiaxing Huang, Sheng Jin, and Shijian Lu. 2024. Vision-language models for vision tasks: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence (2024). + +[44] Yaowei Zheng, Richong Zhang, Junhao Zhang, Yanhan Ye, Zheyan Luo, Zhangchi Feng, and Yongqiang Ma. 2024. LlamaFactory: Unified Efficient Fine-Tuning of $100+$ Language Models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations). Association for Computational Linguistics, Bangkok, Thailand. http://arxiv.org/abs/2403.13372 +[45] Zangwei Zheng, Xiangyu Peng, Tianji Yang, Chenhui Shen, Shenggui Li, Hongxin Liu, Yukun Zhou, Tianyi Li, and Yang You. 2024. Open-sora: Democratizing + +efficient video production for all. arXiv preprint arXiv:2412.20404 (2024). +[46] Hengguang Zhou, Xirui Li, Ruochen Wang, Minhao Cheng, Tianyi Zhou, and Cho-Jui Hsieh. 2025. R1-Zero's "Aha Moment" in Visual Reasoning on a 2B Non-SFT Model. arXiv preprint arXiv:2503.05132 (2025). +[47] Jiashuo Yu Fan Zhang Chenyang Si Yuming Jiang Yuanhan Zhang Tianxing Wu Qingyang Jin Nattapol Chanpaisit Yaohui Wang Xinyuan Chen Limin Wang Dahua Lin Yu Qiao Ziqi Huang, Yinan He and Ziwei Liu. 2023. Vbench: Comprehensive benchmark suite for video generative models. (2023). \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10358/images/21d1b61ce1471ec9165dc7c78447079aa52a214e72782751e4134ec8534961f8.jpg b/data/2025/2504_10xxx/2504.10358/images/21d1b61ce1471ec9165dc7c78447079aa52a214e72782751e4134ec8534961f8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..226449c92b208f19a293e6d16be5b7510738d935 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10358/images/21d1b61ce1471ec9165dc7c78447079aa52a214e72782751e4134ec8534961f8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa6076d3968444f509adca8f7922e7f05bcb479f55d06b7a0b746b4d18464ce5 +size 5782 diff --git a/data/2025/2504_10xxx/2504.10358/images/285e12cf77b75a7f0f08e2efaecc08f1bfa175f1b9a591e9fcbf7e2221583f34.jpg b/data/2025/2504_10xxx/2504.10358/images/285e12cf77b75a7f0f08e2efaecc08f1bfa175f1b9a591e9fcbf7e2221583f34.jpg new file mode 100644 index 0000000000000000000000000000000000000000..435116d0bf19fd48b00a1f63bcc37719aa76ece1 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10358/images/285e12cf77b75a7f0f08e2efaecc08f1bfa175f1b9a591e9fcbf7e2221583f34.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6807fa4155d05f631561c649d6662e2aa356b3432d5e674b68776c1d4b112e2 +size 94306 diff --git a/data/2025/2504_10xxx/2504.10358/images/3cfa156e216f49eb896b12bfa70e91b69fef85e525c85ee27a39baecb044734c.jpg b/data/2025/2504_10xxx/2504.10358/images/3cfa156e216f49eb896b12bfa70e91b69fef85e525c85ee27a39baecb044734c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bfdc798c7ed071662e559b7038a60e03d725ecfb --- /dev/null +++ b/data/2025/2504_10xxx/2504.10358/images/3cfa156e216f49eb896b12bfa70e91b69fef85e525c85ee27a39baecb044734c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3495ce35c095df0c1bf2ce32fc2ab3f7cdfa87d0e32590db70e79367d87315ea +size 21430 diff --git a/data/2025/2504_10xxx/2504.10358/images/54976c59ce4846e46e0766e44bd28c157ead9e397c4980a5824be6ab946e240b.jpg b/data/2025/2504_10xxx/2504.10358/images/54976c59ce4846e46e0766e44bd28c157ead9e397c4980a5824be6ab946e240b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4b9e994541775166bd670c899533bcb900f839de --- /dev/null +++ b/data/2025/2504_10xxx/2504.10358/images/54976c59ce4846e46e0766e44bd28c157ead9e397c4980a5824be6ab946e240b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff3fa91c56c2b0283fc6c0db5cd23ff39324a88b62d5efc7af2956033c7eca33 +size 285083 diff --git a/data/2025/2504_10xxx/2504.10358/images/6c368eff57a1587f634151e0ea0f31e91543a4695b86703b28b55da63364ce56.jpg b/data/2025/2504_10xxx/2504.10358/images/6c368eff57a1587f634151e0ea0f31e91543a4695b86703b28b55da63364ce56.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0e5a09ec225098ade6d7848371b2f672dc619098 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10358/images/6c368eff57a1587f634151e0ea0f31e91543a4695b86703b28b55da63364ce56.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:427ec4453711233aa8c62b4e247c21f271efea3fac24280d6d951847ae77b28c +size 3802 diff --git a/data/2025/2504_10xxx/2504.10358/images/6d9b46e320685a8146cf9dbb86eca159ea5cbe074d46e322ec6947428651c4a5.jpg b/data/2025/2504_10xxx/2504.10358/images/6d9b46e320685a8146cf9dbb86eca159ea5cbe074d46e322ec6947428651c4a5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d1e205fadd98361876f6d82531b98fdbf5f17cd6 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10358/images/6d9b46e320685a8146cf9dbb86eca159ea5cbe074d46e322ec6947428651c4a5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4896dbaa6429c24d18e1c3838c32fc95b7d70a63a7acb327d1afadd6d2620ef +size 9581 diff --git a/data/2025/2504_10xxx/2504.10358/images/7d6921f0086a2ed04a58acfd7dec5e0ba42ca4b1e6fb8d80da3e0245f2a73809.jpg b/data/2025/2504_10xxx/2504.10358/images/7d6921f0086a2ed04a58acfd7dec5e0ba42ca4b1e6fb8d80da3e0245f2a73809.jpg new file mode 100644 index 0000000000000000000000000000000000000000..92898663ef74dde5d86f3b560d31cd38d505f3b2 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10358/images/7d6921f0086a2ed04a58acfd7dec5e0ba42ca4b1e6fb8d80da3e0245f2a73809.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6f8d1984c248a25fe457f18544f1d47cfac142e36545b511a725c25bde4b376 +size 30698 diff --git a/data/2025/2504_10xxx/2504.10358/images/7e79c5f5413e855405378af10caaf9c9af8f05fc58ac361b7acede64c70a4a2e.jpg b/data/2025/2504_10xxx/2504.10358/images/7e79c5f5413e855405378af10caaf9c9af8f05fc58ac361b7acede64c70a4a2e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..707b558c92a662422ae95cf4630bf4294234ac2e --- /dev/null +++ b/data/2025/2504_10xxx/2504.10358/images/7e79c5f5413e855405378af10caaf9c9af8f05fc58ac361b7acede64c70a4a2e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ae94cb15a2a86f7a1e4db365f20a89e02e6d9b6ebfa47e7dd9298821e5d0d84 +size 86427 diff --git a/data/2025/2504_10xxx/2504.10358/images/931be4919122c158dcf6665927fa00c2f4c724ccbd325b5d3ac838b2117721f4.jpg b/data/2025/2504_10xxx/2504.10358/images/931be4919122c158dcf6665927fa00c2f4c724ccbd325b5d3ac838b2117721f4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a463fc71f13d59a2a94a641d50c62e5d648caf20 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10358/images/931be4919122c158dcf6665927fa00c2f4c724ccbd325b5d3ac838b2117721f4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff9b2c0ebfdbae3bfa130836970224952c938e9ea88749857901f092359e6617 +size 4008 diff --git a/data/2025/2504_10xxx/2504.10358/images/9e3526d378290d0e5f1f5b7280de9f4ac5ef4c8a53b37682de9d2eef40d2c009.jpg b/data/2025/2504_10xxx/2504.10358/images/9e3526d378290d0e5f1f5b7280de9f4ac5ef4c8a53b37682de9d2eef40d2c009.jpg new file mode 100644 index 0000000000000000000000000000000000000000..29d1658ff3763fa36f4b523a2a5a2b4aeb111921 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10358/images/9e3526d378290d0e5f1f5b7280de9f4ac5ef4c8a53b37682de9d2eef40d2c009.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14f64e47ce3f61744c234eec7d060f011366f4e578c738071447167c5e859b48 +size 56850 diff --git a/data/2025/2504_10xxx/2504.10358/images/acaf47f4e344d1323fd7320c57e4b5c9bf3d9b4b326a8d15a3173125d51c0a6a.jpg b/data/2025/2504_10xxx/2504.10358/images/acaf47f4e344d1323fd7320c57e4b5c9bf3d9b4b326a8d15a3173125d51c0a6a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e5367f5ec5f41612b5f693641baa34573929e485 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10358/images/acaf47f4e344d1323fd7320c57e4b5c9bf3d9b4b326a8d15a3173125d51c0a6a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ec011ae1bafb96c002e9de427c0fc190fb99f61ae44393b724c083c8bd57e63 +size 3907 diff --git a/data/2025/2504_10xxx/2504.10358/images/af4c5252e3a743d7a1ba6d4c688efce7265a4c7b40a1e0158ee15f39758821a2.jpg b/data/2025/2504_10xxx/2504.10358/images/af4c5252e3a743d7a1ba6d4c688efce7265a4c7b40a1e0158ee15f39758821a2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..34548dd843427a38fc84bca8b07640b558573ec4 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10358/images/af4c5252e3a743d7a1ba6d4c688efce7265a4c7b40a1e0158ee15f39758821a2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfeb610a5145eed5c003f744492ce373650be808920e95841cf5f501785eb375 +size 90010 diff --git a/data/2025/2504_10xxx/2504.10358/images/af95146f37d8f8cfd8f5a3dc449b59de02504308b77779359ca149c3c1e29eb8.jpg b/data/2025/2504_10xxx/2504.10358/images/af95146f37d8f8cfd8f5a3dc449b59de02504308b77779359ca149c3c1e29eb8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dddb864795803007fd0b8961099622e620b84f30 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10358/images/af95146f37d8f8cfd8f5a3dc449b59de02504308b77779359ca149c3c1e29eb8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0aff51e377d28423e817a11a3d4692832d4cc95d6f378e2bb77907bdd105883 +size 5715 diff --git a/data/2025/2504_10xxx/2504.10358/images/b604e40b0e43e7e7d3626cdcf8c4c1cf81f25fd04f076856eeb0e100a4eaf9cf.jpg b/data/2025/2504_10xxx/2504.10358/images/b604e40b0e43e7e7d3626cdcf8c4c1cf81f25fd04f076856eeb0e100a4eaf9cf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..66e436f17ef70d58d3785473b52e71006616a3ab --- /dev/null +++ b/data/2025/2504_10xxx/2504.10358/images/b604e40b0e43e7e7d3626cdcf8c4c1cf81f25fd04f076856eeb0e100a4eaf9cf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49daf03cf08b8aaccd638e9221c10e10975f47dbf37260db715cbc3da924e5e4 +size 5194 diff --git a/data/2025/2504_10xxx/2504.10358/images/c2b0ea46875f24170908414dc8e28e26b4c4eee5428ebc2ca624a2386fc7fac9.jpg b/data/2025/2504_10xxx/2504.10358/images/c2b0ea46875f24170908414dc8e28e26b4c4eee5428ebc2ca624a2386fc7fac9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6b2529995b309ebf632b420c1db93e50d8afe049 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10358/images/c2b0ea46875f24170908414dc8e28e26b4c4eee5428ebc2ca624a2386fc7fac9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac1a7e13d59e09dcdb31f2212dfa2f4c477b4db9b5b7953f1e5256c77c651dd8 +size 6602 diff --git a/data/2025/2504_10xxx/2504.10358/images/c31a7b2de430267fedcf4b392ea6c547efde7a16d17b2e3d84f8d6eb0d7393c5.jpg b/data/2025/2504_10xxx/2504.10358/images/c31a7b2de430267fedcf4b392ea6c547efde7a16d17b2e3d84f8d6eb0d7393c5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8269d4dfa813108e79fcb765391c6ee239d15a56 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10358/images/c31a7b2de430267fedcf4b392ea6c547efde7a16d17b2e3d84f8d6eb0d7393c5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e94c14dbcf7c2f2382f24f81d8fc00295f102408a824a0a9acc79a109d1b581 +size 54099 diff --git a/data/2025/2504_10xxx/2504.10358/images/cffb428c183d85a0a3186df2401038a07ae8b917ead5d2d8c9f0ba15d870fcbb.jpg b/data/2025/2504_10xxx/2504.10358/images/cffb428c183d85a0a3186df2401038a07ae8b917ead5d2d8c9f0ba15d870fcbb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c287315cff4c967638c60b9565d356cb80f6288 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10358/images/cffb428c183d85a0a3186df2401038a07ae8b917ead5d2d8c9f0ba15d870fcbb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b34a0c61e0f9b44edd8a01a973b6c9762741ebfd3e9458135038ac66ade94c0 +size 6374 diff --git a/data/2025/2504_10xxx/2504.10358/images/d6c78e9f47e8d65b14e469d84b0a2f1198e97c98f93d261b872accb21570a54a.jpg b/data/2025/2504_10xxx/2504.10358/images/d6c78e9f47e8d65b14e469d84b0a2f1198e97c98f93d261b872accb21570a54a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3ebddfc766e09580c10165b2ae374bd708d8ce3c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10358/images/d6c78e9f47e8d65b14e469d84b0a2f1198e97c98f93d261b872accb21570a54a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b397ef9765aa2f9843139d142c5e1a301459245ac1886cd4047665920833ca76 +size 3379 diff --git a/data/2025/2504_10xxx/2504.10358/images/d9b02a1b058829ef36e4a6b35714adc0c10256dc2c1d78a1cda461ad9c1d4ccf.jpg b/data/2025/2504_10xxx/2504.10358/images/d9b02a1b058829ef36e4a6b35714adc0c10256dc2c1d78a1cda461ad9c1d4ccf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5512d562af8ff21ce531cb65282edc64af9280ec --- /dev/null +++ b/data/2025/2504_10xxx/2504.10358/images/d9b02a1b058829ef36e4a6b35714adc0c10256dc2c1d78a1cda461ad9c1d4ccf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3dfd277c744442918c353362c794d3218ad638b369027336b181245441040f28 +size 166793 diff --git a/data/2025/2504_10xxx/2504.10358/images/df5f05a6a00514e1b1854a46e4bce0c8c5272fdf30dac6a9895fb987eb882c7e.jpg b/data/2025/2504_10xxx/2504.10358/images/df5f05a6a00514e1b1854a46e4bce0c8c5272fdf30dac6a9895fb987eb882c7e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b5ba47417e6b0ed4c06e63b6727d6b2d57a71163 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10358/images/df5f05a6a00514e1b1854a46e4bce0c8c5272fdf30dac6a9895fb987eb882c7e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aacc7cce7dc9c4bbe0fd18fc8cae2b24757e0ea36b2daa9cc30759a630bf29db +size 5412 diff --git a/data/2025/2504_10xxx/2504.10358/images/f6db93766cb5f466189876315472945b0af1547224588b7f6baaa83e5488d4b6.jpg b/data/2025/2504_10xxx/2504.10358/images/f6db93766cb5f466189876315472945b0af1547224588b7f6baaa83e5488d4b6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7b1df2d3050a4a251c956b622898b43432fc0b08 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10358/images/f6db93766cb5f466189876315472945b0af1547224588b7f6baaa83e5488d4b6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:081ea96004caa0d2c99c040a776ea7ee3dae5076d56f9dff0852c4601497fbd7 +size 26773 diff --git a/data/2025/2504_10xxx/2504.10358/layout.json b/data/2025/2504_10xxx/2504.10358/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..3f5b56fe634fc5d9fb5ef59a501a1d486b667d76 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10358/layout.json @@ -0,0 +1,7591 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 53, + 80, + 558, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 80, + 558, + 118 + ], + "spans": [ + { + "bbox": [ + 53, + 80, + 558, + 118 + ], + "type": "text", + "content": "FingER: Content Aware Fine-grained Evaluation with Reasoning for AI-Generated Videos" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 118, + 125, + 165, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 125, + 165, + 137 + ], + "spans": [ + { + "bbox": [ + 118, + 125, + 165, + 137 + ], + "type": "text", + "content": "Rui Chen" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 77, + 138, + 205, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 138, + 205, + 149 + ], + "spans": [ + { + "bbox": [ + 77, + 138, + 205, + 149 + ], + "type": "text", + "content": "chenrui.chen@alibaba-inc.com" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 94, + 150, + 188, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 150, + 188, + 162 + ], + "spans": [ + { + "bbox": [ + 94, + 150, + 188, + 162 + ], + "type": "text", + "content": "AMAP, Alibaba Group" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 163, + 171, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 163, + 171, + 174 + ], + "spans": [ + { + "bbox": [ + 111, + 163, + 171, + 174 + ], + "type": "text", + "content": "Beijing, China" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 286, + 125, + 325, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 125, + 325, + 137 + ], + "spans": [ + { + "bbox": [ + 286, + 125, + 325, + 137 + ], + "type": "text", + "content": "Lei Sun" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 256, + 138, + 355, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 256, + 138, + 355, + 149 + ], + "spans": [ + { + "bbox": [ + 256, + 138, + 355, + 149 + ], + "type": "text", + "content": "ally.sl@alibaba-inc.com" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 258, + 150, + 352, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 258, + 150, + 352, + 162 + ], + "spans": [ + { + "bbox": [ + 258, + 150, + 352, + 162 + ], + "type": "text", + "content": "AMAP, Alibaba Group" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 276, + 163, + 335, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 163, + 335, + 174 + ], + "spans": [ + { + "bbox": [ + 276, + 163, + 335, + 174 + ], + "type": "text", + "content": "Beijing, China" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 444, + 125, + 494, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 444, + 125, + 494, + 138 + ], + "spans": [ + { + "bbox": [ + 444, + 125, + 494, + 138 + ], + "type": "text", + "content": "Jing Tang" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 410, + 139, + 529, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 410, + 139, + 529, + 149 + ], + "spans": [ + { + "bbox": [ + 410, + 139, + 529, + 149 + ], + "type": "text", + "content": "guangyu.tj@alibaba-inc.com" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 423, + 150, + 516, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 423, + 150, + 516, + 162 + ], + "spans": [ + { + "bbox": [ + 423, + 150, + 516, + 162 + ], + "type": "text", + "content": "AMAP, Alibaba Group" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 440, + 163, + 500, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 440, + 163, + 500, + 174 + ], + "spans": [ + { + "bbox": [ + 440, + 163, + 500, + 174 + ], + "type": "text", + "content": "Beijing, China" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 203, + 183, + 243, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 203, + 183, + 243, + 196 + ], + "spans": [ + { + "bbox": [ + 203, + 183, + 243, + 196 + ], + "type": "text", + "content": "Geng Li" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 163, + 196, + 283, + 208 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 163, + 196, + 283, + 208 + ], + "spans": [ + { + "bbox": [ + 163, + 196, + 283, + 208 + ], + "type": "text", + "content": "xiaofeng/lg@alibaba-inc.com" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 176, + 209, + 269, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 209, + 269, + 220 + ], + "spans": [ + { + "bbox": [ + 176, + 209, + 269, + 220 + ], + "type": "text", + "content": "AMAP, Alibaba Group" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 193, + 221, + 252, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 193, + 221, + 252, + 232 + ], + "spans": [ + { + "bbox": [ + 193, + 221, + 252, + 232 + ], + "type": "text", + "content": "Beijing, China" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 346, + 183, + 428, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 346, + 183, + 428, + 196 + ], + "spans": [ + { + "bbox": [ + 346, + 183, + 428, + 196 + ], + "type": "text", + "content": "Xiangxiang Chu" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 312, + 196, + 463, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 196, + 463, + 207 + ], + "spans": [ + { + "bbox": [ + 312, + 196, + 463, + 207 + ], + "type": "text", + "content": "chuxiangxiang.cxx@alibaba-inc.com" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 340, + 209, + 433, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 340, + 209, + 433, + 220 + ], + "spans": [ + { + "bbox": [ + 340, + 209, + 433, + 220 + ], + "type": "text", + "content": "AMAP, Alibaba Group" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 358, + 221, + 417, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 358, + 221, + 417, + 232 + ], + "spans": [ + { + "bbox": [ + 358, + 221, + 417, + 232 + ], + "type": "text", + "content": "Beijing, China" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 51, + 239, + 96, + 251 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 239, + 96, + 251 + ], + "spans": [ + { + "bbox": [ + 51, + 239, + 96, + 251 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 50, + 254, + 296, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 254, + 296, + 550 + ], + "spans": [ + { + "bbox": [ + 50, + 254, + 296, + 550 + ], + "type": "text", + "content": "Recent advances in video generation have posed great challenges in the assessment of AI-generated content, particularly with the emergence of increasingly sophisticated models. The various inconsistencies and defects observed in such videos are inherently complex, making overall scoring notoriously difficult. In this paper, we emphasize the critical importance of integrating fine-grained reasoning into video evaluation, and we propose FingER, a novel entity-level reasoning evaluation framework that first automatically generates Fine-grained Entity-level questions, and then answers those questions by a Reasoning model with scores, which can be subsequently weighted summed to an overall score for different applications. Specifically, we leverage LLMs to derive entity-level questions across five distinct perspectives, which (i) often focus on some specific entities of the content, thereby making answering or scoring much easier by MLLMs, and (ii) are more interpretable. Then we construct a FingER dataset, consisting of approximately 3.3k videos and corresponding 60k fine-grained QA annotations, each with detailed reasons. Based on that, we further investigate various training protocols to best incentivize the reasoning capability of MLLMs for correct answer prediction. Extensive experiments demonstrate that a reasoning model trained using Group Relative Policy Optimization (GRPO) with a cold-start strategy achieves the best performance. Notably, our model surpasses existing methods by a relative margin of " + }, + { + "bbox": [ + 50, + 254, + 296, + 550 + ], + "type": "inline_equation", + "content": "11.8\\%" + }, + { + "bbox": [ + 50, + 254, + 296, + 550 + ], + "type": "text", + "content": " on GenAI-Bench and " + }, + { + "bbox": [ + 50, + 254, + 296, + 550 + ], + "type": "inline_equation", + "content": "5.5\\%" + }, + { + "bbox": [ + 50, + 254, + 296, + 550 + ], + "type": "text", + "content": " on Monet-Bench with only 3.3k training videos, which is at most one-tenth of the training samples utilized by other methods. Our code and dataset will be released soon." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 51, + 563, + 134, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 563, + 134, + 574 + ], + "spans": [ + { + "bbox": [ + 51, + 563, + 134, + 574 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 50, + 578, + 295, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 578, + 295, + 676 + ], + "spans": [ + { + "bbox": [ + 50, + 578, + 295, + 676 + ], + "type": "text", + "content": "Recent advancements in Text-to-Video (T2V) generative models [2, 4, 45] have demonstrated significant progress in producing visually appealing and content-rich videos. For instance, post-Sora models such as Kling have shown the ability to generate high-resolution videos that closely adhere to textual prompts. However, these models often produce localized artifacts, inconsistencies, and violations of physical laws. These issues highlight the necessity for the development of robust and reliable quality assessment methods for AI-generated video content." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 50, + 677, + 295, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 677, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 677, + 295, + 710 + ], + "type": "text", + "content": "Early research on evaluating AI-generated videos has primarily relied on feature-based metrics, such as the Frechet Video Distance (FVD) [30] and optical flow-based methods like RAFT [26]. While" + } + ] + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 313, + 239, + 559, + 327 + ], + "blocks": [ + { + "bbox": [ + 313, + 239, + 559, + 327 + ], + "lines": [ + { + "bbox": [ + 313, + 239, + 559, + 327 + ], + "spans": [ + { + "bbox": [ + 313, + 239, + 559, + 327 + ], + "type": "image", + "image_path": "7d6921f0086a2ed04a58acfd7dec5e0ba42ca4b1e6fb8d80da3e0245f2a73809.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 430, + 329, + 439, + 337 + ], + "lines": [ + { + "bbox": [ + 430, + 329, + 439, + 337 + ], + "spans": [ + { + "bbox": [ + 430, + 329, + 439, + 337 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 316, + 338, + 559, + 464 + ], + "blocks": [ + { + "bbox": [ + 316, + 338, + 559, + 464 + ], + "lines": [ + { + "bbox": [ + 316, + 338, + 559, + 464 + ], + "spans": [ + { + "bbox": [ + 316, + 338, + 559, + 464 + ], + "type": "image", + "image_path": "9e3526d378290d0e5f1f5b7280de9f4ac5ef4c8a53b37682de9d2eef40d2c009.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 350, + 467, + 359, + 475 + ], + "lines": [ + { + "bbox": [ + 350, + 467, + 359, + 475 + ], + "spans": [ + { + "bbox": [ + 350, + 467, + 359, + 475 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 427, + 467, + 436, + 475 + ], + "lines": [ + { + "bbox": [ + 427, + 467, + 436, + 475 + ], + "spans": [ + { + "bbox": [ + 427, + 467, + 436, + 475 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 512, + 467, + 522, + 475 + ], + "lines": [ + { + "bbox": [ + 512, + 467, + 522, + 475 + ], + "spans": [ + { + "bbox": [ + 512, + 467, + 522, + 475 + ], + "type": "text", + "content": "(d)" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 314, + 486, + 560, + 574 + ], + "lines": [ + { + "bbox": [ + 314, + 486, + 560, + 574 + ], + "spans": [ + { + "bbox": [ + 314, + 486, + 560, + 574 + ], + "type": "text", + "content": "Figure 1: Advanced generation models often exhibit localized defects while maintaining overall visually appealing, as illustrated in (a), which requires fine-grained in-depth understanding. (b) and (c) show that even with detailed instructional prompts and entity-level questions, GPT-4o still fails to identify this hand deformation. (d) shows the effectiveness of our work by integrating reasoning model with fine-grained entity-level questions." + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_caption" + } + ], + "index": 29 + }, + { + "bbox": [ + 313, + 599, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 599, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 599, + 559, + 710 + ], + "type": "text", + "content": "these methods effectively assess overall visual quality and dynamic characteristics, they fall short in capturing nuanced aspects that require deeper semantic understanding and fine-grained reasoning. To address these limitations, recent studies have introduced MLLMs for more comprehensive evaluations. For example, VideoScore [11] proposes a framework that evaluates five distinct aspects of video quality using an MLLM to assign scores ranging from 1 to 4. VisionReward [37] aligns video generation with human perception by formulating predefined judgment questions and fine-tuning a video-based MLLM to compute weighted scores. Similarly, LiFT [33]" + } + ] + } + ], + "index": 34 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 208, + 37, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 208, + 37, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 208, + 37, + 559 + ], + "type": "text", + "content": "arXiv:2504.10358v1 [cs.CV] 14 Apr 2025" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 51, + 85, + 294, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 85, + 294, + 117 + ], + "spans": [ + { + "bbox": [ + 51, + 85, + 294, + 117 + ], + "type": "text", + "content": "learns a reward model that provides reasons and scores across multiple aspects to align the generation model with human preferences. Despite these advancements, two key challenges persist:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 118, + 295, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 118, + 295, + 380 + ], + "spans": [ + { + "bbox": [ + 50, + 118, + 295, + 380 + ], + "type": "text", + "content": "(i) Inadequacy of Fine-grained Video Reasoning: Although advanced generative models have significantly improved global visual quality by reducing issues such as blurriness and flickering, they still exhibit localized spatiotemporal inconsistencies, distortions, unnatural artifacts, and violations of physical laws, especially in scenarios involving complex motion or multiple entities. For instance, Fig 1(a) shows a video generated by Pixverse that, despite its high overall visual appeal, contains a noticeably deformed hand in a localized area. This example underscores the need for more fine-grained and context-aware reasoning capabilities in video understanding, moving beyond superficial visual pattern recognition to incorporate temporally grounded and semantically rich analysis. (ii) Domain Gap in AI-Generated Videos: Current state-of-the-art MLLMs struggle to capture the intrinsic characteristics of AI-generated videos, even with well-defined prompts. As illustrated in Fig 1(b) and (c), GPT-4o misidentifies the deformed hand in a video and assigns a high score based on misleading explanations. This issue is primarily attributed to a domain gap between the training data used by MLLMs and the unique features of AI-generated videos. In essence, AI-generated videos can deceive MLLMs in certain latent feature spaces. Bridging this gap requires a high-quality dataset of AI-generated videos. Moreover, developing strategies to enhance the generalization of MLLMs to AI-generated videos remains an open challenge." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 380, + 295, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 380, + 295, + 578 + ], + "spans": [ + { + "bbox": [ + 50, + 380, + 295, + 578 + ], + "type": "text", + "content": "Inspired by the Question Generation and Answering (QG/A) framework [6] and recent reasoning works [7, 21, 22, 46] that demonstrate a significant self-emergence of complex cognitive reasoning abilities induced by Deepseek R1 [10], we argue that incorporating fine-grained reasoning abilities would significantly enhance the video quality assessment. In this paper, we propose FingER, a novel framework that first decomposes the overall evaluation into fine-grained entity-level questions and then answers these questions with corresponding scores by a reasoning model, which is fine-tuned on our high-quality dataset using GRPO with a cold-start initialization. Specifically, we employ five distinct aspects as defined in VideoScore [11], including text-to-video alignment, temporal consistency, factual consistency, dynamic degree, and visual quality. By deriving such fine-grained entity-level questions, our framework not only enables the model to explicitly focus on specific characteristics of certain entities, thereby facilitating a more fine-grained understanding, but also enhances interpretability through these structured QA pairs." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 578, + 295, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 578, + 295, + 698 + ], + "spans": [ + { + "bbox": [ + 50, + 578, + 295, + 698 + ], + "type": "text", + "content": "Based on these questions, we prompted several strong MLLMs [13, 25] to provide answers. However, we observed that these models struggle to provide correct answers, particularly in aspects like factual consistency. As stated before, we attribute this to the lack of high-quality AI-generated video datasets and the inadequate reasoning capabilities of current models. Therefore, we curated a fine-grained AI-generated video reasoning dataset, FingER-Instruct-60k, which consists of " + }, + { + "bbox": [ + 50, + 578, + 295, + 698 + ], + "type": "inline_equation", + "content": "3.3\\mathrm{k}" + }, + { + "bbox": [ + 50, + 578, + 295, + 698 + ], + "type": "text", + "content": " AI-generated videos sourced from advanced generation models like Kling, Luma, Vidu, PixVerse, CogVideoX [38], etc. For each video, we generate fine-grained questions and annotate them with 'Yes/No'. To ease human labor and" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 313, + 85, + 559, + 183 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 85, + 559, + 183 + ], + "spans": [ + { + "bbox": [ + 313, + 85, + 559, + 183 + ], + "type": "text", + "content": "also reduce potential errors, we leverage MLLMs to generate detailed reasoning explanations given each question and its answer. (Note that, while MLLMs often struggle to answer these questions correctly, they demonstrate higher possibilities of producing coherent reasoning when the answer is explicitly provided, suggesting the presence of underlying reasoning capabilities.) These generated reasons were subsequently re-checked and refined by human annotators to ensure accuracy and quality. At last, we collect 60k fine-grained QA annotations with high-quality detailed reasons." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 184, + 559, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 184, + 559, + 293 + ], + "spans": [ + { + "bbox": [ + 313, + 184, + 559, + 293 + ], + "type": "text", + "content": "To enhance the video reasoning capabilities, we choose Qwen2.5-VL [1], and explore multiple training protocols on our dataset, including directly training with answers, training with reasons, zero GRPO training and GRPO training with a cold-start initialization. Our experiments reveal that integrating high-quality reasons can largely increase the performance along with the interpretability, and GRPO with cold-start can further enhance its performance, especially in dimensions that require in-depth understanding. We also test our reasoning model in a zero-shot manner on public benchmarks, and still consistently achieve state-of-the-art performance." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 293, + 559, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 293, + 559, + 337 + ], + "spans": [ + { + "bbox": [ + 313, + 293, + 559, + 337 + ], + "type": "text", + "content": "In summary, we propose an entity-level quality assessment framework with strong reasoning and generalization capabilities. To the best of our knowledge, our work is the first to introduce entity-level reasoning into the quality assessment of AI-generated videos." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 324, + 338, + 508, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 338, + 508, + 347 + ], + "spans": [ + { + "bbox": [ + 324, + 338, + 508, + 347 + ], + "type": "text", + "content": "Our contributions can be summarized as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 331, + 351, + 559, + 624 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 331, + 351, + 559, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 351, + 559, + 438 + ], + "spans": [ + { + "bbox": [ + 331, + 351, + 559, + 438 + ], + "type": "text", + "content": "- Novel Evaluation Approach. We propose a novel evaluation approach FingER, designed for practical AI-generated video quality assessment. It comprises an entity-level question generation module and a video reasoning model that provides corresponding scores. By emphasizing fine-grained reasoning, our approach effectively addresses localized defects in AI-generated videos that require in-depth understanding and significantly enhances interpretability." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 331, + 439, + 559, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 439, + 559, + 502 + ], + "spans": [ + { + "bbox": [ + 331, + 439, + 559, + 502 + ], + "type": "text", + "content": "- Fine-grained Reasoning Dataset. We present a new dataset for AI-generated video reasoning, containing 3.3k videos and 60k entity-level QA annotations sourced from advanced generation models. Each QA pair is annotated with detailed reasons. This dataset aims to further advance research in this field." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 331, + 504, + 559, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 504, + 559, + 570 + ], + "spans": [ + { + "bbox": [ + 331, + 504, + 559, + 570 + ], + "type": "text", + "content": "- Enhanced Training Protocols. We explore multiple training protocols to enhance the fine-grained video reasoning capabilities of MLLMs. Notably, we are the first to introduce GRPO training into AI-generated video quality assessment, which proves to be highly effective in improving both reasoning and generalization abilities" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 331, + 571, + 559, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 571, + 559, + 624 + ], + "spans": [ + { + "bbox": [ + 331, + 571, + 559, + 624 + ], + "type": "text", + "content": "- **Strong Performance.** Extensive experiments demonstrate the effectiveness of our approach. We achieve state-of-the-art performance on public benchmarks using only one-tenth of the training videos, thereby highlighting the superior generalization capability of our model." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 315, + 635, + 403, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 635, + 403, + 647 + ], + "spans": [ + { + "bbox": [ + 315, + 635, + 403, + 647 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 315, + 651, + 473, + 663 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 651, + 473, + 663 + ], + "spans": [ + { + "bbox": [ + 315, + 651, + 473, + 663 + ], + "type": "text", + "content": "2.1 Video Quality Assessment" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 666, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 666, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 314, + 666, + 559, + 710 + ], + "type": "text", + "content": "Early approaches relied on feature-based metrics, such as Fréchet Video Distance (FVD) [30], Inception Score (IS) [28], and CLIPSim [29]. And benchmark works like EvalCrafter [40] and VBench [47] introduced comprehensive evaluation frameworks with 18 and 16" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 82, + 281, + 365 + ], + "blocks": [ + { + "bbox": [ + 52, + 82, + 281, + 365 + ], + "lines": [ + { + "bbox": [ + 52, + 82, + 281, + 365 + ], + "spans": [ + { + "bbox": [ + 52, + 82, + 281, + 365 + ], + "type": "image", + "image_path": "7e79c5f5413e855405378af10caaf9c9af8f05fc58ac361b7acede64c70a4a2e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 374, + 560, + 398 + ], + "lines": [ + { + "bbox": [ + 50, + 374, + 560, + 398 + ], + "spans": [ + { + "bbox": [ + 50, + 374, + 560, + 398 + ], + "type": "text", + "content": "Figure 2: The overview of our proposed FingER framework, including (a) the evaluation pipeline, (b) FingER-Instruct-60k dataset curation, and (c) GRPO training of our reasoning model." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 282, + 82, + 559, + 365 + ], + "blocks": [ + { + "bbox": [ + 282, + 82, + 559, + 365 + ], + "lines": [ + { + "bbox": [ + 282, + 82, + 559, + 365 + ], + "spans": [ + { + "bbox": [ + 282, + 82, + 559, + 365 + ], + "type": "image", + "image_path": "af4c5252e3a743d7a1ba6d4c688efce7265a4c7b40a1e0158ee15f39758821a2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 414, + 295, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 414, + 295, + 436 + ], + "spans": [ + { + "bbox": [ + 50, + 414, + 295, + 436 + ], + "type": "text", + "content": "metrics, respectively. However, these methods fall short in assessing deep semantic understanding or aligning with human perception." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 436, + 295, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 436, + 295, + 601 + ], + "spans": [ + { + "bbox": [ + 50, + 436, + 295, + 601 + ], + "type": "text", + "content": "With the rapid advancement of MLLMs [1, 5, 8, 25], increasing studies have explored to leverage their capabilities to facilitate image/video quality evaluation [6, 12, 18, 19, 36]. Inspired by DSG [6], which uses question generation/answering (QG/A) for interpretable assessment, T2VScore [14] adopted a QA framework for T2V alignment. T2VQA [27] introduced the T2VQA-DB dataset, comprising 10k videos annotated with Mean Opinion Scores (MOS), and trained a transformer-based model to predict these scores. Similarly, VideoScore [11] proposed a larger dataset across five dimensions and employed a MLLM for scoring. VMBench [20] introduced perception-aligned motion metrics to evaluate motion quality. While these methods predict scores or labels, they often overlook the reasoning behind assessments, limiting their effectiveness. Our work distinguishes itself by incorporating entity-level reasoning for evaluating advanced generation models reliably." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 601, + 295, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 601, + 295, + 677 + ], + "spans": [ + { + "bbox": [ + 50, + 601, + 295, + 677 + ], + "type": "text", + "content": "Another line of research focuses on reward models for improving generative models via Reinforcement Learning from Human Feedback (RLHF), such as Diffusion-DPO [32], VisionReward [37] and UnifiedReward [34]. While these efforts target generative model optimization, our work emphasizes practical video quality evaluation, we expect it is able to further benefit the generation models using RLHF in future work." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 314, + 413, + 527, + 426 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 413, + 527, + 426 + ], + "spans": [ + { + "bbox": [ + 314, + 413, + 527, + 426 + ], + "type": "text", + "content": "2.2 Reasoning Inference in Large Models" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 427, + 560, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 427, + 560, + 625 + ], + "spans": [ + { + "bbox": [ + 313, + 427, + 560, + 625 + ], + "type": "text", + "content": "Reasoning inference aims to emulate human-like thinking processes by forming the final answer through a Large Language Model (LLM). Specifically, to answer a given question, an LLM is required to think divergently and record the thinking processes, which are subsequently referenced when formulating the final answer. This approach has inspired a variety of research, including prompting-based Chain-of-Thought (CoT) [35], planning-based Graph-of-Thought [3] and Tree-of-Thought [39] processing, reward methods [16], and supervised fine-tuning (SFT) datasets with sufficient context [41]. Notably, DeepSeek-R1 [10] integrates specific prompts with reinforcement learning (RL), enabling the model to first generate the thinking process before producing the final answer. This method allows for supervised fine-tuning with a small amount of annotated data containing thinking processes, followed by reinforcement learning fine-tuning on more data without thinking processes. A very recent approach [7] proposes a highly simplified reinforcement learning framework and demonstrates its validity across several benchmarks." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 315, + 639, + 373, + 651 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 639, + 373, + 651 + ], + "spans": [ + { + "bbox": [ + 315, + 639, + 373, + 651 + ], + "type": "text", + "content": "3 Method" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 654, + 560, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 654, + 560, + 711 + ], + "spans": [ + { + "bbox": [ + 313, + 654, + 560, + 711 + ], + "type": "text", + "content": "In this section, we first introduce our entity-level video quality assessment framework - FingER in Sec. 3.1. Then, we detail the data curation pipeline of our proposed dataset, namely FingER-Instruct-60k in Sec. 3.2. In the end, we combine multiple training methods with our proposed instruction tuning dataset, from the" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 84, + 297, + 107 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 84, + 297, + 107 + ], + "spans": [ + { + "bbox": [ + 50, + 84, + 297, + 107 + ], + "type": "text", + "content": "basic supervised fine-tuning (SFT), to reasoning training with reinforcement learning (RL), as detailed in Sec. 3.3." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 51, + 146, + 223, + 159 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 146, + 223, + 159 + ], + "spans": [ + { + "bbox": [ + 51, + 146, + 223, + 159 + ], + "type": "text", + "content": "3.1 Entity-level VQA Framework" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 161, + 295, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 161, + 295, + 293 + ], + "spans": [ + { + "bbox": [ + 50, + 161, + 295, + 293 + ], + "type": "text", + "content": "For Text-to-Video (T2V) generation task, user input prompt is the only key instruction for generative models to understand and generate content that well-aligned with user's intent. To perform entity-level quality assessment of AI-generated videos, we start from understanding the user's prompt through extracting entities, attributes, and actions within itself. Inspired by DSG [6] in Text-to-Image (T2I) evaluation, we also utilize closed-source Large-Language-Model (LLM) to perform textual understanding and the following entity extraction. As shown in Fig. 4, we provide abundant in-context learning (ICL) [35] examples from different video generation scenarios and formulate the final input for GPT-4o [13], in which way we can harvest more steady entity extraction results." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 293, + 295, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 293, + 295, + 490 + ], + "spans": [ + { + "bbox": [ + 50, + 293, + 295, + 490 + ], + "type": "text", + "content": "With entities extracted from the user's prompt, we generate entity-level questions from five distinct video quality assessment dimensions, including visual quality, text-to-video alignment, temporal consistency, factual consistency, and dynamic degree. For each dimension, we provide a detailed explanation followed by several key points, formulating the context information when prompting the LLM. We also prepare adequate entity-level in-context learning examples, which are summarized from videos with and without obvious artifacts or hallucinations. In this way, we can help the LLMs to better understand which question should be asked when coping with a specific entity along with the given assessment dimension. In short, we break down the granularity of fine-grained video quality assessment from multi-dimensional level to entity-level. And the intuition behind entity-level question generation is that we hope fine-grained question/answering can guide the MLLM to focus on understanding the correlation between entity-level textual description and its corresponding visual appearance based on the video content." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 491, + 295, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 491, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 491, + 295, + 710 + ], + "type": "text", + "content": "After the entity-level question generation procedure, our fin-tuned MLLM answers the above questions with a simple \"Yes\" or \"No\", along with a detailed reasoning process explaining why the answer is that. Learning the logical reasoning process is critical for model performance improvements, as detailed in the experiment Sec. 4.4. The outputted reason can also be useful when conducting practical video quality assessment, which is more interpretable and user-friendly. To formulate a final score representing the overall quality of AI-generated videos, we start by calculating the probability of the answer token (\"Yes\" or \"No\") for each entity-level question to represent the entity-level score. Since there are multiple \"Yes\" and \"No\" with different formats but similar meanings in the vocabulary of our MLLM, we first gather the token set for \"Yes\" and \"No\". In this paper, we take [\"Yes\", \"yes\", \"YES\", \"Yes\", \"Yes\"] as the token set for answer \"Yes\", and [\"No\", \"no\", \"NO\", \"No\", \"No\"] for answer \"No\", denoted by " + }, + { + "bbox": [ + 50, + 491, + 295, + 710 + ], + "type": "inline_equation", + "content": "T_{Y}" + }, + { + "bbox": [ + 50, + 491, + 295, + 710 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 491, + 295, + 710 + ], + "type": "inline_equation", + "content": "T_{N}" + }, + { + "bbox": [ + 50, + 491, + 295, + 710 + ], + "type": "text", + "content": ", respectively. With logits from the answer token, we extract all the logit whose token id is within the token set, and apply softmax over " + }, + { + "bbox": [ + 50, + 491, + 295, + 710 + ], + "type": "inline_equation", + "content": "T_{Y} \\cup T_{N}" + }, + { + "bbox": [ + 50, + 491, + 295, + 710 + ], + "type": "text", + "content": ", as illustrated in Eq. 1. Then, given the entity-level question " + }, + { + "bbox": [ + 50, + 491, + 295, + 710 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 50, + 491, + 295, + 710 + ], + "type": "text", + "content": ", we can get the answer's probability " + }, + { + "bbox": [ + 50, + 491, + 295, + 710 + ], + "type": "inline_equation", + "content": "P(No \\mid q)" + }, + { + "bbox": [ + 50, + 491, + 295, + 710 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 491, + 295, + 710 + ], + "type": "inline_equation", + "content": "P(Yes \\mid q)" + }, + { + "bbox": [ + 50, + 491, + 295, + 710 + ], + "type": "text", + "content": " with a simple sum" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 315, + 87, + 330, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 87, + 330, + 96 + ], + "spans": [ + { + "bbox": [ + 315, + 87, + 330, + 96 + ], + "type": "text", + "content": "up." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 361, + 95, + 558, + 129 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 361, + 95, + 558, + 129 + ], + "spans": [ + { + "bbox": [ + 361, + 95, + 558, + 129 + ], + "type": "interline_equation", + "content": "P (N o \\mid q) = \\sum_ {\\substack {i = 1 \\\\ m}} ^ {n} \\text {S o f t m a x} (x _ {i}), x _ {i} \\in T _ {N}; \\tag{1}", + "image_path": "af95146f37d8f8cfd8f5a3dc449b59de02504308b77779359ca149c3c1e29eb8.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 361, + 128, + 512, + 155 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 361, + 128, + 512, + 155 + ], + "spans": [ + { + "bbox": [ + 361, + 128, + 512, + 155 + ], + "type": "interline_equation", + "content": "P (Y e s \\mid q) = \\sum_ {j = 1} ^ {m} S o f t m a x (y _ {j}), y _ {j} \\in T _ {Y}.", + "image_path": "b604e40b0e43e7e7d3626cdcf8c4c1cf81f25fd04f076856eeb0e100a4eaf9cf.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 156, + 560, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 156, + 560, + 266 + ], + "spans": [ + { + "bbox": [ + 313, + 156, + 560, + 266 + ], + "type": "text", + "content": "Instead of directly using the derived probability as the entity-level score, we still need the judgment on whether the question is positive or negative. For example, given the question \"Do the attributes of the table in the video (such as size, shape, and material) align with real-world characteristics?\" from the factual consistency dimension, it is apparent that the factual consistency of the assessed video goes up with a positive \"Yes\" answer. We define this type of question as a positive one, and vice versa. We denote the status of an entity-level question with " + }, + { + "bbox": [ + 313, + 156, + 560, + 266 + ], + "type": "inline_equation", + "content": "q_{stat}" + }, + { + "bbox": [ + 313, + 156, + 560, + 266 + ], + "type": "text", + "content": ", if " + }, + { + "bbox": [ + 313, + 156, + 560, + 266 + ], + "type": "inline_equation", + "content": "q_{stat}" + }, + { + "bbox": [ + 313, + 156, + 560, + 266 + ], + "type": "text", + "content": " equals 1, it means that the question is positive; otherwise, the question is negative." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 362, + 269, + 558, + 295 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 362, + 269, + 558, + 295 + ], + "spans": [ + { + "bbox": [ + 362, + 269, + 558, + 295 + ], + "type": "interline_equation", + "content": "S _ {\\text {e n t i t y}} = \\left\\{ \\begin{array}{l l} P (N o \\mid q), & \\text {i f} q _ {\\text {s t a t}} = 0; \\\\ P (Y e s \\mid q), & \\text {i f} q _ {\\text {s t a t}} = 1. \\end{array} \\right. \\tag {2}", + "image_path": "c2b0ea46875f24170908414dc8e28e26b4c4eee5428ebc2ca624a2386fc7fac9.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 298, + 560, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 298, + 560, + 441 + ], + "spans": [ + { + "bbox": [ + 313, + 298, + 560, + 441 + ], + "type": "text", + "content": "With the aforementioned preparations setup, we propose our entity-level score " + }, + { + "bbox": [ + 313, + 298, + 560, + 441 + ], + "type": "inline_equation", + "content": "S_{entity}" + }, + { + "bbox": [ + 313, + 298, + 560, + 441 + ], + "type": "text", + "content": ", which correlates positively with the quality of the assessed video. When the entity-level question is positive, we use the probability of the \"Yes\" answer " + }, + { + "bbox": [ + 313, + 298, + 560, + 441 + ], + "type": "inline_equation", + "content": "P(Yes \\mid q)" + }, + { + "bbox": [ + 313, + 298, + 560, + 441 + ], + "type": "text", + "content": " to represent the score it can gain. And we utilize the probability of the \"No\" answer " + }, + { + "bbox": [ + 313, + 298, + 560, + 441 + ], + "type": "inline_equation", + "content": "P(No \\mid q)" + }, + { + "bbox": [ + 313, + 298, + 560, + 441 + ], + "type": "text", + "content": " if the question is negative, as illustrated in Eq. 2. In short, our intuition behind this design is that as long as the video quality goes up with which answer, we calculate our entity-level score based on that answer's probability. Then, we utilize entity-level question/answering pairs that are under the same quality assessment dimension to formulate our dimension-level score " + }, + { + "bbox": [ + 313, + 298, + 560, + 441 + ], + "type": "inline_equation", + "content": "S_{dim}" + }, + { + "bbox": [ + 313, + 298, + 560, + 441 + ], + "type": "text", + "content": ". To be specific, we simply calculate the linear summation of multiple answers' probability " + }, + { + "bbox": [ + 313, + 298, + 560, + 441 + ], + "type": "inline_equation", + "content": "S_{entity}" + }, + { + "bbox": [ + 313, + 298, + 560, + 441 + ], + "type": "text", + "content": ", as illustrated in Eq. 3." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 400, + 445, + 558, + 475 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 400, + 445, + 558, + 475 + ], + "spans": [ + { + "bbox": [ + 400, + 445, + 558, + 475 + ], + "type": "interline_equation", + "content": "S _ {d i m} = \\sum_ {i = 1} ^ {N} S _ {e n t i t y} i \\tag {3}", + "image_path": "6c368eff57a1587f634151e0ea0f31e91543a4695b86703b28b55da63364ce56.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 314, + 479, + 558, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 479, + 558, + 502 + ], + "spans": [ + { + "bbox": [ + 314, + 479, + 558, + 502 + ], + "type": "text", + "content": "In the end, we derive the overall-level score " + }, + { + "bbox": [ + 314, + 479, + 558, + 502 + ], + "type": "inline_equation", + "content": "S_{\\text{overall}}" + }, + { + "bbox": [ + 314, + 479, + 558, + 502 + ], + "type": "text", + "content": " with the weighted average of five distinct dimension scores " + }, + { + "bbox": [ + 314, + 479, + 558, + 502 + ], + "type": "inline_equation", + "content": "S_{dim}" + }, + { + "bbox": [ + 314, + 479, + 558, + 502 + ], + "type": "text", + "content": " in Eq. 4." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 392, + 505, + 558, + 533 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 392, + 505, + 558, + 533 + ], + "spans": [ + { + "bbox": [ + 392, + 505, + 558, + 533 + ], + "type": "interline_equation", + "content": "S _ {\\text {o v e r a l l}} = \\sum_ {i = 1} ^ {5} w _ {i}. S _ {\\text {d i m}} i \\tag {4}", + "image_path": "acaf47f4e344d1323fd7320c57e4b5c9bf3d9b4b326a8d15a3173125d51c0a6a.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 536, + 560, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 536, + 560, + 591 + ], + "spans": [ + { + "bbox": [ + 313, + 536, + 560, + 591 + ], + "type": "text", + "content": "In short, we propose the entity-level VQA framework FingER, which consists of three parts: (i) entity-level question generation, (ii) the fine-tuned MLLM with reasoning output, and (iii) the hierarchical scoring function that converts token probability to multi-level scores." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 601, + 521, + 614 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 601, + 521, + 614 + ], + "spans": [ + { + "bbox": [ + 314, + 601, + 521, + 614 + ], + "type": "text", + "content": "3.2 Entity-level Dataset with Reasoning" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 616, + 560, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 616, + 560, + 638 + ], + "spans": [ + { + "bbox": [ + 313, + 616, + 560, + 638 + ], + "type": "text", + "content": "In this section, we introduce the construction pipeline of our entity-level instruction tuning dataset, named FingER-Instruct-60k." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 643, + 560, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 643, + 560, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 643, + 560, + 710 + ], + "type": "text", + "content": "3.2.1 Prompt and T2V Model Selection. Based on VideoGenEval [42] dataset, our instruction tuning dataset is composed of 420 diverse text prompts and 3.3k AI-generated videos produced by 8 modern T2V models, including closed-source models: Kling, Luma, PixVerse, Vidu, Qingying, and open-sourced models: Mochi-1 [24], CogVideoX [38], Open-Sora [45]. We utilize all 420 text" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 84, + 296, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 84, + 296, + 205 + ], + "spans": [ + { + "bbox": [ + 50, + 84, + 296, + 205 + ], + "type": "text", + "content": "prompts from the T2V session [45], which cover a diverse range of complex scenarios, including human-centric activities, material and spatial relationships, as well as animal and text generations. These prompts are derived from real-life user inputs. As for the T2V model selection, we denote models that understand and obey most of the common sense and physical laws, and generate time-consistent videos without obvious temporal distortions as the high-quality model. We select the generative models uniformly based solely on the quality of their generated videos, spanning from high-quality models to average-quality models, for a more diverse training data distribution." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 217, + 296, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 217, + 296, + 338 + ], + "spans": [ + { + "bbox": [ + 50, + 217, + 296, + 338 + ], + "type": "text", + "content": "3.2.2 Entity-level Question Generation and Annotation. Our multi-dimensional entity-level question generation starts with understanding users' input prompts and extracting the entities within. We use GPT-40 [13] for prompt understanding and entity extraction, with abundant in-context learning examples provided. Then, we perform the entity-level question generation for our five distinct assessment dimensions. For each entity, we prompt the LLM with task introduction, assessment dimension explanation with several key points to focus on, user's input prompt, the extracted entity, and the most important in-context learning examples. And we extract the generated questions with regular expression matching." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 338, + 296, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 338, + 296, + 392 + ], + "spans": [ + { + "bbox": [ + 50, + 338, + 296, + 392 + ], + "type": "text", + "content": "For data annotation, we engaged 10 professional annotators to complete the task of annotating " + }, + { + "bbox": [ + 50, + 338, + 296, + 392 + ], + "type": "inline_equation", + "content": "60\\mathrm{k}" + }, + { + "bbox": [ + 50, + 338, + 296, + 392 + ], + "type": "text", + "content": " question/answer pairs. Inter-annotator agreement was ensured through multiple rounds of small-scale pilot annotations, and the entire process took approximately one month to complete." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 404, + 295, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 404, + 295, + 536 + ], + "spans": [ + { + "bbox": [ + 50, + 404, + 295, + 536 + ], + "type": "text", + "content": "3.2.3 Reasoning Generation and Verification. We employ the powerful closed-source MLLM [25] to generate the initial version of the reasoning process. Specifically, we prompt the MLLM with the assessment dimension explanation, user prompt, in-context learning examples, and the entity-level question along with its human-annotated result. An interesting finding is that when the MLLM is provided with the correct answer to the entity-level question, the generated reasoning process for explaining the answer is more reasonable than when directly generating the answer and its reason. Rather than using the MLLM-generated reasoning process directly, we conduct thorough human verification to ensure the quality of our reasoning training data." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 536, + 296, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 536, + 296, + 580 + ], + "spans": [ + { + "bbox": [ + 50, + 536, + 296, + 580 + ], + "type": "text", + "content": "With aforementioned entity-level questions, human-annoted answers and detailed reasoning process, we formulate our instruction tuning dataset FingER-Instruct-60k, which serves as our basis for the model training in the next section." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 595, + 274, + 608 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 595, + 274, + 608 + ], + "spans": [ + { + "bbox": [ + 51, + 595, + 274, + 608 + ], + "type": "text", + "content": "3.3 Instruction Tuning and GRPO Training" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 609, + 296, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 609, + 296, + 643 + ], + "spans": [ + { + "bbox": [ + 50, + 609, + 296, + 643 + ], + "type": "text", + "content": "We use Qwen2.5-VL-7B-Instruct [1] as our base model and apply supervised fine-tuning, SFT with reasoning and reinforcement learning on it." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 654, + 296, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 654, + 296, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 654, + 296, + 710 + ], + "type": "text", + "content": "3.3.1 Supervised Fine-Tuning. We directly train the base model on FingER-Instruct-60k, the response of model only contains \"Yes\" or \"No\" answer following the next token prediction paradigm. It means the model only needs to learn predicting the correct answer without any reasoning process. The loss function is Cross-Entropy" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 315, + 85, + 337, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 85, + 337, + 95 + ], + "spans": [ + { + "bbox": [ + 315, + 85, + 337, + 95 + ], + "type": "text", + "content": "Loss:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 392, + 94, + 559, + 124 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 392, + 94, + 559, + 124 + ], + "spans": [ + { + "bbox": [ + 392, + 94, + 559, + 124 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {C E} = - \\sum_ {i = 1} ^ {N} y _ {i} \\log \\left(p _ {i}\\right) \\tag {5}", + "image_path": "931be4919122c158dcf6665927fa00c2f4c724ccbd325b5d3ac838b2117721f4.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 314, + 128, + 559, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 128, + 559, + 216 + ], + "spans": [ + { + "bbox": [ + 314, + 128, + 559, + 216 + ], + "type": "text", + "content": "3.3.2 Supervised Fine-Tuning with Reasoning. We also train base model on FingER-Instruct-60k, but the difference compared to Supervised Fine-Tuning is the model needs to learn predicting the correct answer within " + }, + { + "bbox": [ + 314, + 128, + 559, + 216 + ], + "type": "inline_equation", + "content": "<" + }, + { + "bbox": [ + 314, + 128, + 559, + 216 + ], + "type": "text", + "content": " answer " + }, + { + "bbox": [ + 314, + 128, + 559, + 216 + ], + "type": "inline_equation", + "content": ">" + }, + { + "bbox": [ + 314, + 128, + 559, + 216 + ], + "type": "text", + "content": " ... " + }, + { + "bbox": [ + 314, + 128, + 559, + 216 + ], + "type": "inline_equation", + "content": "<" + }, + { + "bbox": [ + 314, + 128, + 559, + 216 + ], + "type": "text", + "content": " /answer " + }, + { + "bbox": [ + 314, + 128, + 559, + 216 + ], + "type": "inline_equation", + "content": ">" + }, + { + "bbox": [ + 314, + 128, + 559, + 216 + ], + "type": "text", + "content": " tag and its reasoning processes within " + }, + { + "bbox": [ + 314, + 128, + 559, + 216 + ], + "type": "inline_equation", + "content": "<" + }, + { + "bbox": [ + 314, + 128, + 559, + 216 + ], + "type": "text", + "content": " reason " + }, + { + "bbox": [ + 314, + 128, + 559, + 216 + ], + "type": "inline_equation", + "content": ">" + }, + { + "bbox": [ + 314, + 128, + 559, + 216 + ], + "type": "text", + "content": " ... " + }, + { + "bbox": [ + 314, + 128, + 559, + 216 + ], + "type": "inline_equation", + "content": "<" + }, + { + "bbox": [ + 314, + 128, + 559, + 216 + ], + "type": "text", + "content": " /reason " + }, + { + "bbox": [ + 314, + 128, + 559, + 216 + ], + "type": "inline_equation", + "content": ">" + }, + { + "bbox": [ + 314, + 128, + 559, + 216 + ], + "type": "text", + "content": " tag. We apply prompt engineering on the input tokens to reach this difference. The loss also contains the gap of reasoning processes and the gap of answers." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 314, + 221, + 561, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 221, + 561, + 287 + ], + "spans": [ + { + "bbox": [ + 314, + 221, + 561, + 287 + ], + "type": "text", + "content": "3.3.3 GRPO Training. We employ GRPO [23] to enhance reasoning inference performance, exploring two protocols: (i) Zero-GRPO, which relies solely on reinforcement learning without initial supervised data; and (ii) GRPO with cold-start Supervised Fine-Tuning, which combines initial supervised learning with subsequent reinforcement optimization." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 314, + 293, + 561, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 293, + 561, + 370 + ], + "spans": [ + { + "bbox": [ + 314, + 293, + 561, + 370 + ], + "type": "text", + "content": "Zero-GRPO. Zero-GRPO is an exploratory attempt that is initiated directly from Qwen-2.5-VL [1] and uses RL to implicitly improve reasoning abilities without annotated reason. For each video question pair, we first sample a group of outputs " + }, + { + "bbox": [ + 314, + 293, + 561, + 370 + ], + "type": "inline_equation", + "content": "\\{o_1,o_2,\\dots,o_G\\}" + }, + { + "bbox": [ + 314, + 293, + 561, + 370 + ], + "type": "text", + "content": " by old policy " + }, + { + "bbox": [ + 314, + 293, + 561, + 370 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta_{old}}(o_i|v,q)" + }, + { + "bbox": [ + 314, + 293, + 561, + 370 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 314, + 293, + 561, + 370 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 314, + 293, + 561, + 370 + ], + "type": "text", + "content": " denotes the video that needs to be evaluated, " + }, + { + "bbox": [ + 314, + 293, + 561, + 370 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 314, + 293, + 561, + 370 + ], + "type": "text", + "content": " denotes the question for each entity and dimension. Then update the policy model " + }, + { + "bbox": [ + 314, + 293, + 561, + 370 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 314, + 293, + 561, + 370 + ], + "type": "text", + "content": " by minimizing the following loss." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 343, + 373, + 559, + 475 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 343, + 373, + 559, + 475 + ], + "spans": [ + { + "bbox": [ + 343, + 373, + 559, + 475 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {G R P O} (\\theta) = - \\mathbb {E} [ q \\sim P (Q), \\{o _ {i} \\} _ {i = 1} ^ {G} \\sim \\pi_ {\\theta_ {o l d}} (O | v, q) ] \\\\ \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\left(\\min \\left(\\frac {\\pi_ {\\theta} (o _ {i} | v , q)}{\\pi_ {\\theta o l d} (o _ {i} | v , q)} * A d v _ {i}, \\right. \\right. \\\\ \\left. \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta} \\left(o _ {i} \\mid v , q\\right)}{\\pi_ {\\theta o l d} \\left(o _ {i} \\mid v , q\\right)}, 1 - \\epsilon , 1 + \\epsilon\\right) * A d v _ {i}\\right) \\tag {6} \\\\ \\left. + \\beta \\mathbb {D} _ {K L} \\left(\\pi_ {\\theta} | | \\pi_ {r e f}\\right)\\right) \\\\ \\end{array}", + "image_path": "3cfa156e216f49eb896b12bfa70e91b69fef85e525c85ee27a39baecb044734c.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 337, + 484, + 559, + 510 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 337, + 484, + 559, + 510 + ], + "spans": [ + { + "bbox": [ + 337, + 484, + 559, + 510 + ], + "type": "interline_equation", + "content": "\\mathbb {D} _ {K L} \\left(\\pi_ {\\theta} \\| \\pi_ {r e f}\\right) = \\frac {\\pi_ {r e f} \\left(o _ {i} | v , q\\right)}{\\pi_ {\\theta} \\left(o _ {i} | v , q\\right)} - l o g \\frac {\\pi_ {r e f} \\left(o _ {i} | v , q\\right)}{\\pi_ {\\theta} \\left(o _ {i} | v , q\\right)} - 1 \\tag {7}", + "image_path": "6d9b46e320685a8146cf9dbb86eca159ea5cbe074d46e322ec6947428651c4a5.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 511, + 560, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 511, + 560, + 556 + ], + "spans": [ + { + "bbox": [ + 314, + 511, + 560, + 556 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 314, + 511, + 560, + 556 + ], + "type": "text", + "content": " denotes the coefficient of Kullback-Leibler Divergence [15] between base model and policy model, " + }, + { + "bbox": [ + 314, + 511, + 560, + 556 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 314, + 511, + 560, + 556 + ], + "type": "text", + "content": " denotes the threshold of clip. " + }, + { + "bbox": [ + 314, + 511, + 560, + 556 + ], + "type": "inline_equation", + "content": "Adv_{i}" + }, + { + "bbox": [ + 314, + 511, + 560, + 556 + ], + "type": "text", + "content": " is the advantage which is the normalization of a group of rewards " + }, + { + "bbox": [ + 314, + 511, + 560, + 556 + ], + "type": "inline_equation", + "content": "\\{r_1,r_2,\\dots,r_G\\}" + }, + { + "bbox": [ + 314, + 511, + 560, + 556 + ], + "type": "text", + "content": " computed from outputs within each group:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 378, + 563, + 559, + 588 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 378, + 563, + 559, + 588 + ], + "spans": [ + { + "bbox": [ + 378, + 563, + 559, + 588 + ], + "type": "interline_equation", + "content": "A d v _ {i} = \\frac {r _ {i} - M e a n \\left\\{r _ {1} , r _ {2} , \\dots , r _ {g} \\right\\}}{\\operatorname {S t d} \\left\\{r _ {1} , r _ {2} , \\dots , r _ {G} \\right\\}} \\tag {8}", + "image_path": "21d1b61ce1471ec9165dc7c78447079aa52a214e72782751e4134ec8534961f8.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 315, + 589, + 465, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 589, + 465, + 599 + ], + "spans": [ + { + "bbox": [ + 315, + 589, + 465, + 599 + ], + "type": "inline_equation", + "content": "r_i" + }, + { + "bbox": [ + 315, + 589, + 465, + 599 + ], + "type": "text", + "content": " is composed of two reward functions:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 388, + 612, + 559, + 624 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 612, + 559, + 624 + ], + "spans": [ + { + "bbox": [ + 388, + 612, + 559, + 624 + ], + "type": "interline_equation", + "content": "r _ {i} = r _ {\\text {a c c u r a c y} _ {i}} + r _ {\\text {f o r m a t} _ {i}} \\tag {9}", + "image_path": "d6c78e9f47e8d65b14e469d84b0a2f1198e97c98f93d261b872accb21570a54a.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 367, + 627, + 559, + 655 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 627, + 559, + 655 + ], + "spans": [ + { + "bbox": [ + 367, + 627, + 559, + 655 + ], + "type": "interline_equation", + "content": "r _ {\\text {a c c u r a c y} _ {i}} = \\left\\{ \\begin{array}{l l} 1. 0 & \\text {i f a n s w e r} _ {i} = G T _ {i} \\\\ 0. 0 & \\text {e l s e} \\end{array} \\right. \\tag {10}", + "image_path": "df5f05a6a00514e1b1854a46e4bce0c8c5272fdf30dac6a9895fb987eb882c7e.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 350, + 658, + 559, + 686 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 350, + 658, + 559, + 686 + ], + "spans": [ + { + "bbox": [ + 350, + 658, + 559, + 686 + ], + "type": "interline_equation", + "content": "r _ {\\text {f o r m a t} i} = \\left\\{ \\begin{array}{l l} 1. 0 & \\text {i f} o _ {i} \\text {i n c l u d e s c o r r e c t f o r m a t} \\\\ 0. 0 & \\text {e l s e} \\end{array} \\right. \\tag {11}", + "image_path": "cffb428c183d85a0a3186df2401038a07ae8b917ead5d2d8c9f0ba15d870fcbb.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 315, + 688, + 517, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 688, + 517, + 698 + ], + "spans": [ + { + "bbox": [ + 315, + 688, + 517, + 698 + ], + "type": "text", + "content": "Correct format means the output " + }, + { + "bbox": [ + 315, + 688, + 517, + 698 + ], + "type": "inline_equation", + "content": "o_i" + }, + { + "bbox": [ + 315, + 688, + 517, + 698 + ], + "type": "text", + "content": " contains two tags:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 325, + 699, + 550, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 699, + 550, + 709 + ], + "spans": [ + { + "bbox": [ + 325, + 699, + 550, + 709 + ], + "type": "inline_equation", + "content": "< \\text{answer}>\\ldots < / \\text{answer}>" + }, + { + "bbox": [ + 325, + 699, + 550, + 709 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 325, + 699, + 550, + 709 + ], + "type": "inline_equation", + "content": "< \\text{reason}>\\ldots < / \\text{reason}>" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 51, + 84, + 294, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 84, + 294, + 108 + ], + "spans": [ + { + "bbox": [ + 51, + 84, + 294, + 108 + ], + "type": "text", + "content": "\"Yes\" or \"No\" token only appears within the answer tag, and the reasoning process only appears within reason tag." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 112, + 295, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 112, + 295, + 201 + ], + "spans": [ + { + "bbox": [ + 50, + 112, + 295, + 201 + ], + "type": "text", + "content": "GRPO with cold-start Supervised Fine-Tuning. DeepSeek-R1 demonstrated that fine-tuning on an annotated dataset with reasoning processes before applying reinforcement learning (RL) yields better performance than directly using RL [10]. We adopt this approach in our supervised fine-tuning model. The sole difference between Zero-GRPO and GRPO with cold-start Supervised Fine-Tuning lies in the base model: the latter is initialized from a model pre-trained on annotated data containing reasoning processes." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 51, + 209, + 134, + 221 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 209, + 134, + 221 + ], + "spans": [ + { + "bbox": [ + 51, + 209, + 134, + 221 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 225, + 237, + 236 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 225, + 237, + 236 + ], + "spans": [ + { + "bbox": [ + 51, + 225, + 237, + 236 + ], + "type": "text", + "content": "4.1 Datasets and Evaluation Metrics" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 239, + 295, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 239, + 295, + 373 + ], + "spans": [ + { + "bbox": [ + 50, + 239, + 295, + 373 + ], + "type": "text", + "content": "4.1.1 Datasets. We split 185 generated videos (around " + }, + { + "bbox": [ + 50, + 239, + 295, + 373 + ], + "type": "inline_equation", + "content": "5\\%" + }, + { + "bbox": [ + 50, + 239, + 295, + 373 + ], + "type": "text", + "content": " of whole data) with 3.5k entity-level questions from 5 distinct quality assessment dimensions to formulate our FingER-test dataset. Regarding the public benchmarks, we adopt the popular GenAI-Bench [17] and recently released MonetBench [37] for performance evaluation. GenAI-Bench contains 800 unique text prompts paired with 4 T2V models, and each generated video has MOS (Mean Opinion Scores) annotated by 3 annotators. MonetBench consists of 1000 different text prompts, each paired with 2 T2V models. Each pair of videos is generated with the same prompt but different video generation models. MonetBench annotates the video pair with human preferences, including \"win\", \"lose\", and \"tie\" options." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 376, + 295, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 376, + 295, + 486 + ], + "spans": [ + { + "bbox": [ + 50, + 376, + 295, + 486 + ], + "type": "text", + "content": "4.1.2 Evaluation Metrics. We report the accuracy (Acc) of \"Yes\" or \"No\" answers, the Pearson linear correlation coefficient (PLCC), and the Spearman rank correlation coefficient (SRCC) on our proposed FingER-test dataset. We evaluate our models with and without token probability calculation, denoted by " + }, + { + "bbox": [ + 50, + 376, + 295, + 486 + ], + "type": "inline_equation", + "content": "(w / o\\text{prob})" + }, + { + "bbox": [ + 50, + 376, + 295, + 486 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 376, + 295, + 486 + ], + "type": "inline_equation", + "content": "(w/\\text{prob})" + }, + { + "bbox": [ + 50, + 376, + 295, + 486 + ], + "type": "text", + "content": " in Tab. 1 and Tab. 2. Following previous works in [11, 19], we utilize the SRCC and the PLCC for evaluating model's performance on GenAI-Bench. And we use pairwise accuracy as the metrics for human preference evaluation on MonetBench and report tau and diff, followed [9, 43]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 496, + 197, + 508 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 496, + 197, + 508 + ], + "spans": [ + { + "bbox": [ + 51, + 496, + 197, + 508 + ], + "type": "text", + "content": "4.2 Implementation Details" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 510, + 295, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 510, + 295, + 663 + ], + "spans": [ + { + "bbox": [ + 50, + 510, + 295, + 663 + ], + "type": "text", + "content": "Based on Qwen-2.5-VL-7B [1], we fine-tune our model with the following experiment settings: learning rate of " + }, + { + "bbox": [ + 50, + 510, + 295, + 663 + ], + "type": "inline_equation", + "content": "5.0\\mathrm{e - 6}" + }, + { + "bbox": [ + 50, + 510, + 295, + 663 + ], + "type": "text", + "content": ", global batch size of 32, video input fps (frame-per-second) is set to 2, and video maximum input resolution is set to " + }, + { + "bbox": [ + 50, + 510, + 295, + 663 + ], + "type": "inline_equation", + "content": "448\\times 448" + }, + { + "bbox": [ + 50, + 510, + 295, + 663 + ], + "type": "text", + "content": " pixels. We utilize LLaMA-Factory [44] as our supervised fine-tuning (SFT) codebase. We perform SFT on our proposed FingER-Instruct-60k dataset for 2 epochs with 8 NVIDIA H20 GPUs, and the training steps are the same for the model trained with extra reasoning process. As for the settings of our reinforcement learning (RL) experiments, we employ Huggingface-TRL [31] as our RL fine-tuning tool with following hyper-parameters to implement GRPO: " + }, + { + "bbox": [ + 50, + 510, + 295, + 663 + ], + "type": "inline_equation", + "content": "\\beta = 0.04" + }, + { + "bbox": [ + 50, + 510, + 295, + 663 + ], + "type": "text", + "content": ", and the number of group " + }, + { + "bbox": [ + 50, + 510, + 295, + 663 + ], + "type": "inline_equation", + "content": "G = 16" + }, + { + "bbox": [ + 50, + 510, + 295, + 663 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 50, + 510, + 295, + 663 + ], + "type": "inline_equation", + "content": "\\epsilon = 0.2" + }, + { + "bbox": [ + 50, + 510, + 295, + 663 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 50, + 510, + 295, + 663 + ], + "type": "inline_equation", + "content": "\\mu = 1" + }, + { + "bbox": [ + 50, + 510, + 295, + 663 + ], + "type": "text", + "content": ", the initial learning rate of RL is " + }, + { + "bbox": [ + 50, + 510, + 295, + 663 + ], + "type": "inline_equation", + "content": "5.0\\mathrm{e - 7}" + }, + { + "bbox": [ + 50, + 510, + 295, + 663 + ], + "type": "text", + "content": ". We train Zero-GRPO and GRPO with cold-start for 2k steps on 4 NVIDIA H20 GPUs." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 673, + 270, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 673, + 270, + 685 + ], + "spans": [ + { + "bbox": [ + 51, + 673, + 270, + 685 + ], + "type": "text", + "content": "4.3 Zero-shot Performance on FingER-test" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 687, + 294, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 687, + 294, + 710 + ], + "spans": [ + { + "bbox": [ + 51, + 687, + 294, + 710 + ], + "type": "text", + "content": "We report the zero-shot performance of Qwen2.5-VL across five dimensions on our dataset. Through ablations on resolution, frame" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 317, + 81, + 558, + 189 + ], + "blocks": [ + { + "bbox": [ + 317, + 81, + 558, + 189 + ], + "lines": [ + { + "bbox": [ + 317, + 81, + 558, + 189 + ], + "spans": [ + { + "bbox": [ + 317, + 81, + 558, + 189 + ], + "type": "image", + "image_path": "f6db93766cb5f466189876315472945b0af1547224588b7f6baaa83e5488d4b6.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 314, + 198, + 559, + 220 + ], + "lines": [ + { + "bbox": [ + 314, + 198, + 559, + 220 + ], + "spans": [ + { + "bbox": [ + 314, + 198, + 559, + 220 + ], + "type": "text", + "content": "Figure 3: Zero-shot performance on five distinct assessment dimensions with different input resolution and fps." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 249, + 559, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 249, + 559, + 282 + ], + "spans": [ + { + "bbox": [ + 313, + 249, + 559, + 282 + ], + "type": "text", + "content": "rate (fps), and evaluation granularity, we reveal the capabilities of the base model to handle different dimensions, and further demonstrate the crucial importance of integrating entity-level evaluation." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 282, + 559, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 282, + 559, + 380 + ], + "spans": [ + { + "bbox": [ + 313, + 282, + 559, + 380 + ], + "type": "text", + "content": "Increasing resolution and fps leads to slight improvements. Fig. 3 illustrates the accuracy across five dimensions when prompted with entity-level questions. We can see that the accuracy curves show slight improvements with increasing resolutions or frame rates (fps), albeit at a significant computational cost. These results suggest that resolution and fps are not the primary factors of performance enhancement. Consequently, for efficiency we adopt " + }, + { + "bbox": [ + 313, + 282, + 559, + 380 + ], + "type": "inline_equation", + "content": "448 \\times 448" + }, + { + "bbox": [ + 313, + 282, + 559, + 380 + ], + "type": "text", + "content": " pixels and 2 fps as the default settings for subsequent zero-shot and supervised fine-tuning (SFT) experiments." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 381, + 559, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 381, + 559, + 501 + ], + "spans": [ + { + "bbox": [ + 313, + 381, + 559, + 501 + ], + "type": "text", + "content": "Performance varies significantly across different dimensions. As shown in Fig. 3, the zero-shot accuracy for visual quality is exceptionally low at " + }, + { + "bbox": [ + 313, + 381, + 559, + 501 + ], + "type": "inline_equation", + "content": "26.1\\%" + }, + { + "bbox": [ + 313, + 381, + 559, + 501 + ], + "type": "text", + "content": ", while factual consistency achieves " + }, + { + "bbox": [ + 313, + 381, + 559, + 501 + ], + "type": "inline_equation", + "content": "57.6\\%" + }, + { + "bbox": [ + 313, + 381, + 559, + 501 + ], + "type": "text", + "content": ". In contrast, dimensions like text alignment show higher accuracy at " + }, + { + "bbox": [ + 313, + 381, + 559, + 501 + ], + "type": "inline_equation", + "content": "80.59\\%" + }, + { + "bbox": [ + 313, + 381, + 559, + 501 + ], + "type": "text", + "content": ", likely due to the base model's inherent capabilities from pre-training on caption data. We believe that the notably low accuracy in visual quality is primarily attributed to misalignment from AI-generated videos, and the main challenges still lie in dimensions requiring in-depth reasoning, such as factual consistency, temporal consistency, and text alignment, which will be further demonstrated in the following section." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 502, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 502, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 502, + 559, + 710 + ], + "type": "text", + "content": "Integrating entity-level evaluations brings a substantial performance gain. To validate the efficacy of our entity-level QA framework, we conduct experiments across three evaluation granularities: overall level, dimension level, and our proposed entity level, as detailed in Tab. 1. The overall level (1st row) prompts the model with an overall assessment rating from 1 to 4, accompanied by detailed evaluation criteria, while the dimension level (2nd row) prompts model to rate each dimension from 1 to 4, which are then averaged to get a final score. The results of our proposed entity-level (3rd and 4th rows) are reported with and without a probability calculation strategy introduced in Sec. 3.1, and furthermore, we instruct the model to provide explanatory reasoning along with answers (last two rows). Compared to the entity-level framework, both the overall and dimension levels exhibit substantial performance degradation across all dimensions, indicating that fine-grained evaluation substantially enhances the model's performance. It is worth noting that incorporating explanatory reasoning does not bring improvements, revealing the inherent limitations of the base model in understanding AI-generated videos." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 53, + 105, + 556, + 198 + ], + "blocks": [ + { + "bbox": [ + 119, + 83, + 490, + 94 + ], + "lines": [ + { + "bbox": [ + 119, + 83, + 490, + 94 + ], + "spans": [ + { + "bbox": [ + 119, + 83, + 490, + 94 + ], + "type": "text", + "content": "Table 1: Correlation between model Zero-shot answer and human reference on FingER-test" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 53, + 105, + 556, + 198 + ], + "lines": [ + { + "bbox": [ + 53, + 105, + 556, + 198 + ], + "spans": [ + { + "bbox": [ + 53, + 105, + 556, + 198 + ], + "type": "table", + "html": "
MethodVisual QualityTemporalDynamic DegreeText AlignmentFactualOverall
Qwen2.5-VLAcc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCC
Overall Level------/30.68/29.27
Dimension Level-/35.06/35.54-/16.05/17.06-/14.81/14.09-/33.68/32.62-/13.86/12.28-/52.32/61.14
Entity (w/o prob)25.33/1.85/5.2278.72/83.26/83.9172.87/51.04/48.9881.6/70.68/73.4458.34/51.03/53.2766.50/80.86/83.71
Entity (w/ prob)25.33/40.60/40.9478.72/84.51/85.4472.87/56.48/56.8581.6/74.09/76.4958.34/57.45/58.6766.50/81.23/85.26
+Reason (w/o prob)45.71/49.97/49.6177.65/83.12/83.8975.21/54.30/52.8781.08/73.24/75.3140.51/17.43/23.5563.96/73.40/79.15
+Reason (w/ prob)45.71/46.29/49.6477.65/84.60/83.8975.21/48.88/52.8081.08/72.38/75.3540.51/29.27/23.5063.96/73.29/79.18
", + "image_path": "285e12cf77b75a7f0f08e2efaecc08f1bfa175f1b9a591e9fcbf7e2221583f34.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 53, + 232, + 556, + 375 + ], + "blocks": [ + { + "bbox": [ + 63, + 209, + 545, + 220 + ], + "lines": [ + { + "bbox": [ + 63, + 209, + 545, + 220 + ], + "spans": [ + { + "bbox": [ + 63, + 209, + 545, + 220 + ], + "type": "text", + "content": "Table 2: Correlation between SFT/RL model answer and human reference on FingER-test (Z-GRPO means Zero-GRPO)" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 53, + 232, + 556, + 375 + ], + "lines": [ + { + "bbox": [ + 53, + 232, + 556, + 375 + ], + "spans": [ + { + "bbox": [ + 53, + 232, + 556, + 375 + ], + "type": "table", + "html": "
MethodVisual QualityTemporalDynamic DegreeText AlignmentFactualOverall
Acc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCCAcc/SRCC/PLCC
GPT-4o [13]62.19/56.24/57.9377.83/78.64/79.1368.31/54.14/57.0283.41/72.20/74.3358.77/48.93/49.5169.92/81.25/82.36
VideoScore [11]-/22.80/18.55-/23.84/26.06-/9.49/7.18-/19.18/13.87-/22.93/18.31-/20.39/17.68
Qwen2.5-VL [1]25.33/40.60/40.9478.72/84.51/85.4472.87/56.48/56.8581.6/74.09/76.4958.34/57.45/58.6766.50/81.23/85.26
Z-GRPO (w/o prob)76.01/73.39/70.4678.01/83.13/83.8277.93/69.74/68.4784.46/73.80/75.9955.21/47.47/50.3374.51/83.46/86.56
Z-GRPO (w/ prob)76.01/71.83/71.9778.01/81.81/83.8677.93/67.49/68.5184.46/74.38/76.2855.21/42.21/50.1574.51/83.24/86.82
FingER (w/o prob)83.78/83.48/82.5383.33/83.13/83.7083.23/71.37/67.9582.77/70.94/73.7572.89/64.12/64.6181.25/88.87/89.67
FingER (w/ prob)83.78/85.31/85.2283.33/86.24/86.9983.23/77.07/74.7382.77/73.85/77.9872.89/70.99/69.2681.25/90.23/91.41
+Reason (w/o prob)84.05/81.51/81.0084.04/85.88/86.6382.49/69.22/68.2286.79/77.87/79.7774.03/67.47/68.4182.33/89.79/91.64
+Reason (w/ prob)84.05/83.85/83.8784.04/86.51/87.0982.49/76.11/76.7086.79/79.34/83.1674.03/71.70/70.2782.33/90.31/92.04
+GRPO (w/o prob)82.30/80.62/78.0982.98/85.08/85.5781.63/65.54/64.9285.88/75.74/77.9174.04/68.65/70.7381.41/89.26/91.25
+GRPO (w/ prob)82.30/83.76/83.5182.98/86.64/87.4381.63/75.05/74.6885.88/78.32/82.6374.04/71.87/72.0381.41/90.43/92.41
", + "image_path": "d9b02a1b058829ef36e4a6b35714adc0c10256dc2c1d78a1cda461ad9c1d4ccf.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 53, + 411, + 291, + 548 + ], + "blocks": [ + { + "bbox": [ + 51, + 389, + 294, + 399 + ], + "lines": [ + { + "bbox": [ + 51, + 389, + 294, + 399 + ], + "spans": [ + { + "bbox": [ + 51, + 389, + 294, + 399 + ], + "type": "text", + "content": "Table 3: Zero-shot Evaluation Results on Public Benchmarks" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 53, + 411, + 291, + 548 + ], + "lines": [ + { + "bbox": [ + 53, + 411, + 291, + 548 + ], + "spans": [ + { + "bbox": [ + 53, + 411, + 291, + 548 + ], + "type": "table", + "html": "
MethodGenAI-Bench[17]MonetBench[37]
SRCCPLCCtaudiff
GPT-4o[13]35.7936.6145.7048.30
Qwen2.5-VL[1]46.6244.2946.7044.27
VideoScore[11]42.2240.6249.1054.90
VQAScore[19]52.7050.6056.1059.50
Zero-GRPO49.5844.3951.3051.34
FingER54.1352.6053.9057.31
+ Reason56.6857.2557.8062.07
+ GRPO57.0356.5958.0062.80
", + "image_path": "c31a7b2de430267fedcf4b392ea6c547efde7a16d17b2e3d84f8d6eb0d7393c5.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 574, + 279, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 574, + 279, + 586 + ], + "spans": [ + { + "bbox": [ + 50, + 574, + 279, + 586 + ], + "type": "text", + "content": "4.4 SFT and RL Performance on FingER-test" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 588, + 294, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 588, + 294, + 666 + ], + "spans": [ + { + "bbox": [ + 50, + 588, + 294, + 666 + ], + "type": "text", + "content": "In this section, we report the performance of our reasoning model on FingER-test using different training protocols including SFT with answers, SFT with reasons, zero GRPO, and GRPO with a cold start, we also provide results using the closed-source model GPT-40 and VideScore [11] for comparisons, as detailed in Tab. 2. Note that all these results, except for VideoScore [11], are obtained by entity-level evaluations for fair comparisons." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 666, + 295, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 666, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 666, + 295, + 710 + ], + "type": "text", + "content": "Our model, trained with only answers, demonstrates significant performance improvements over the base model, achieving overall gains of 14.75/9.00/6.15 in Acc/SRCC/PLCC, respectively. Substantial improvements are observed in the dimensions of visual quality," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 392, + 559, + 424 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 392, + 559, + 424 + ], + "spans": [ + { + "bbox": [ + 313, + 392, + 559, + 424 + ], + "type": "text", + "content": "dynamic degree, and factual consistency. Note that the improvement in the text alignment dimension is limited, mainly due to its inherent capabilities derived from pre-training data." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 424, + 559, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 424, + 559, + 501 + ], + "spans": [ + { + "bbox": [ + 313, + 424, + 559, + 501 + ], + "type": "text", + "content": "Incorporating additional reasoning during training further boosts the performance, particularly in the dimensions of text alignment, factual consistency, and temporal consistency. For the text alignment dimension, the SFT with reasoning harvests performance gains with 4.02/5.49/5.18 in Acc/SRCC/PLCC. These improvements underscore the importance of in-depth video understanding to achieve higher performance in these dimensions." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 501, + 559, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 501, + 559, + 686 + ], + "spans": [ + { + "bbox": [ + 313, + 501, + 559, + 686 + ], + "type": "text", + "content": "We further investigate the reasoning training using RL, which includes two kinds of training procedures: (1) Zero-GRPO, and (2) GRPO initialized with a cold-start from reasoning SFT training. The results presented in Table 2 reveal that Zero-GRPO fails to predict correct answers. Upon closer examination of the training process, we identified that the issue stems from the reasoning component. Zero-GRPO generates reasons that resemble captions rather than logical reasoning. In contrast, when GRPO is applied with a cold-start initialization from our reasoning SFT model, it is able to surpass the SFT model with only 1k additional training steps. Among these dimensions, we observed steady performance improvements in the temporal and factual consistency dimensions, with boosts of " + }, + { + "bbox": [ + 313, + 501, + 559, + 686 + ], + "type": "inline_equation", + "content": "1.15 / 0.88 / 2.77" + }, + { + "bbox": [ + 313, + 501, + 559, + 686 + ], + "type": "text", + "content": " in factual consistency. We believe that the reasoning cold-start teaches the model to reason in a rough manner, while GRPO guides it towards adopting reasons with correct answers, thereby incentivizing the reasoning capability in the model." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 687, + 559, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 687, + 559, + 709 + ], + "spans": [ + { + "bbox": [ + 313, + 687, + 559, + 709 + ], + "type": "text", + "content": "Moreover, we evaluate the performance on our proposed FingER-test dataset with closed-source MLLM [13] (1st row), and VideoScore" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 81, + 426, + 89 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 81, + 426, + 89 + ], + "spans": [ + { + "bbox": [ + 56, + 81, + 426, + 89 + ], + "type": "text", + "content": "Text Prompt: The camera follows a person standing alone by the lake, gazing at the distant sunset, with their reflection mirrored on the water's surface." + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 53, + 89, + 558, + 439 + ], + "blocks": [ + { + "bbox": [ + 53, + 89, + 558, + 439 + ], + "lines": [ + { + "bbox": [ + 53, + 89, + 558, + 439 + ], + "spans": [ + { + "bbox": [ + 53, + 89, + 558, + 439 + ], + "type": "image", + "image_path": "54976c59ce4846e46e0766e44bd28c157ead9e397c4980a5824be6ab946e240b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 111, + 450, + 499, + 462 + ], + "lines": [ + { + "bbox": [ + 111, + 450, + 499, + 462 + ], + "spans": [ + { + "bbox": [ + 111, + 450, + 499, + 462 + ], + "type": "text", + "content": "Figure 4: Qualitative results. We show several reasoning results outputted by our GRPO model." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 479, + 295, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 479, + 295, + 501 + ], + "spans": [ + { + "bbox": [ + 50, + 479, + 295, + 501 + ], + "type": "text", + "content": "[11] (2nd row), our proposed FingER outperforms those methods with a large margin across all five assessment dimensions." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 530, + 255, + 544 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 530, + 255, + 544 + ], + "spans": [ + { + "bbox": [ + 50, + 530, + 255, + 544 + ], + "type": "text", + "content": "4.5 Comparison on Public Benchmarks" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 545, + 295, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 545, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 545, + 295, + 710 + ], + "type": "text", + "content": "Tab. 3 demonstrates the consistent improvements achieved by our method on two public benchmarks. We compare our methods with GPT-40, Qwen2.5-VL and two other approaches. Specifically, with only Yes/No answer prediction, we already outperform all methods on GenAI-Bench, indicating the effectiveness of our fine-grained evaluation framework. Training with reasons and GRPO with a cold-start leads to further improvements with a final " + }, + { + "bbox": [ + 50, + 545, + 295, + 710 + ], + "type": "inline_equation", + "content": "8.21\\% / 11.83\\%" + }, + { + "bbox": [ + 50, + 545, + 295, + 710 + ], + "type": "text", + "content": " SRCC/PLCC relative performance boost. On MonetBench, without any weight fitting, we just average scores of five dimensions, our method is able to achieve " + }, + { + "bbox": [ + 50, + 545, + 295, + 710 + ], + "type": "inline_equation", + "content": "3.39\\% / 5.55\\%" + }, + { + "bbox": [ + 50, + 545, + 295, + 710 + ], + "type": "text", + "content": " relative improvements of tau/diff. It is worth noting that VideoScore [11] is trained using 37.6k training videos, while VQAScore [19] utilizes 665k samples, we outperform these methods with only 3.3k training videos without additional training samples from other sources, which is at most one-tenth of the training size adopted by other methods." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 315, + 478, + 391, + 488 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 478, + 391, + 488 + ], + "spans": [ + { + "bbox": [ + 315, + 478, + 391, + 488 + ], + "type": "text", + "content": "5 Conclusion" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 492, + 561, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 492, + 561, + 679 + ], + "spans": [ + { + "bbox": [ + 313, + 492, + 561, + 679 + ], + "type": "text", + "content": "In this paper, we emphasize the critical importance of integrating fine-grained reasoning into AI-generated video quality assessment, and we propose FingER, an entity-level fine-grained quality assessment framework with five distinct evaluation dimensions for AI-generated videos. To bridge the gap between non-AI videos and AI-generated videos, we construct a high-quality dataset, FingER-Instruct-60k, which consists of 3.3k videos generated by modern T2V models and 60k entity-level question / answering / reasoning pairs. Based on this dataset, we explore multiple training protocols to best incentivize the model's reasoning capability, including reason SFT, zero GRPO and GRPO with a reasoning cold-start. Extensive experiments demonstrate that by utilizing GRPO training with a cold-start, our method not only achieves the best performance on our dataset, but also outperforms other methods and closed-source models on two public benchmarks. And it is worth noting that we achieve SOTA performance with only 3.3k training samples." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 83, + 108, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 83, + 108, + 95 + ], + "spans": [ + { + "bbox": [ + 52, + 83, + 108, + 95 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 53, + 97, + 295, + 695 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 56, + 97, + 294, + 121 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 97, + 294, + 121 + ], + "spans": [ + { + "bbox": [ + 56, + 97, + 294, + 121 + ], + "type": "text", + "content": "[1] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. 2025. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923 (2025)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 121, + 295, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 121, + 295, + 152 + ], + "spans": [ + { + "bbox": [ + 55, + 121, + 295, + 152 + ], + "type": "text", + "content": "[2] Fan Bao, Chendong Xiang, Gang Yue, Guande He, Hongzhou Zhu, Kaiwen Zheng, Min Zhao, Shilong Liu, Yaole Wang, and Jun Zhu. 2024. Vudu: a highly consistent, dynamic and skilled text-to-video generator with diffusion models. arXiv preprint arXiv:2405.04233 (2024)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 153, + 294, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 153, + 294, + 192 + ], + "spans": [ + { + "bbox": [ + 56, + 153, + 294, + 192 + ], + "type": "text", + "content": "[3] Maciej Besta, Nils Blach, Ales Kubicek, Robert Gerstenberger, Michal Podstawski, Lukas Gianinazzi, Joanna Gajda, Tomasz Lehmann, Hubert Niewiadomski, Piotr Nczyk, et al. 2024. Graph of thoughts: Solving elaborate problems with large language models. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 38. 17682-17690." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 194, + 295, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 194, + 295, + 224 + ], + "spans": [ + { + "bbox": [ + 56, + 194, + 295, + 224 + ], + "type": "text", + "content": "[4] Tim Brooks, Bill Peebles, Connor Holmes, Will DePue, Yufei Guo, Li Jing, David Schnurr, Joe Taylor, Troy Luhman, Eric Luhman, et al. 2024. Video generation models as world simulators. 2024. URL https://openai.com/research/video-generation-models-as-world-simulators-3 (2024), 1." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 225, + 294, + 263 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 225, + 294, + 263 + ], + "spans": [ + { + "bbox": [ + 56, + 225, + 294, + 263 + ], + "type": "text", + "content": "[5] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. 2024. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 24185-24198." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 265, + 294, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 265, + 294, + 297 + ], + "spans": [ + { + "bbox": [ + 56, + 265, + 294, + 297 + ], + "type": "text", + "content": "[6] Jaemin Cho, Yushi Hu, Jason M Baldridge, Roopal Garg, Peter Anderson, Ranjay Krishna, Mohit Bansal, Jordi Pont-Tuset, and Su Wang. 2024. Davidsonian Scene Graph: Improving Reliability in Fine-grained Evaluation for Text-to-Image Generation. In ICLR." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 297, + 294, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 297, + 294, + 320 + ], + "spans": [ + { + "bbox": [ + 56, + 297, + 294, + 320 + ], + "type": "text", + "content": "[7] Xiangxiang Chu, Hailang Huang, Xiao Zhang, Fei Wei, and Yong Wang. 2025. GPG: A Simple and Strong Reinforcement Learning Baseline for Model Reasoning. arXiv preprint arXiv:2504.02546 (2025)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 321, + 294, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 321, + 294, + 352 + ], + "spans": [ + { + "bbox": [ + 56, + 321, + 294, + 352 + ], + "type": "text", + "content": "[8] Xiangxiang Chu, Limeng Qiao, Xinyu Zhang, Shuang Xu, Fei Wei, Yang Yang, Xiaofei Sun, Yiming Hu, Xinyang Lin, Bo Zhang, et al. 2024. MobilevIm v2: Faster and stronger baseline for vision language model. arXiv preprint arXiv:2402.03766 (2024)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 353, + 294, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 353, + 294, + 384 + ], + "spans": [ + { + "bbox": [ + 56, + 353, + 294, + 384 + ], + "type": "text", + "content": "[9] Daniel Deutsch, George Foster, and Markus Freitag. 2023. Ties Matter: Meta-Evaluating Modern Metrics with Pairwise Accuracy and Tie Calibration. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing. 12914-12929." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 384, + 294, + 416 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 384, + 294, + 416 + ], + "spans": [ + { + "bbox": [ + 53, + 384, + 294, + 416 + ], + "type": "text", + "content": "[10] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948 (2025)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 417, + 294, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 417, + 294, + 456 + ], + "spans": [ + { + "bbox": [ + 53, + 417, + 294, + 456 + ], + "type": "text", + "content": "[11] Xuan He, Dongfu Jiang, Ge Zhang, Max Ku, Achint Soni, Sherman Siu, Haonan Chen, Abhranil Chandra, Ziyan Jiang, Aaran Arulraj, et al. 2024. VideoScore: Building Automatic Metrics to Simulate Fine-grained Human Feedback for Video Generation. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing. 2105-2123." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 456, + 294, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 456, + 294, + 488 + ], + "spans": [ + { + "bbox": [ + 53, + 456, + 294, + 488 + ], + "type": "text", + "content": "[12] Hailang Huang, Yong Wang, Zixuan Huang, Huaqiu Li, Tongwen Huang, Xi-angxiang Chu, and Richong Zhang. 2024. MMGenBench: Evaluating the Limits of LMMs from the Text-to-Image Generation Perspective. arXiv preprint arXiv:2411.14062 (2024)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 53, + 488, + 294, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 488, + 294, + 512 + ], + "spans": [ + { + "bbox": [ + 53, + 488, + 294, + 512 + ], + "type": "text", + "content": "[13] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. 2024. GPT-4o System Card. arXiv preprint arXiv:2410.21276 (2024)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 53, + 512, + 294, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 512, + 294, + 544 + ], + "spans": [ + { + "bbox": [ + 53, + 512, + 294, + 544 + ], + "type": "text", + "content": "[14] Haoning Wu Xintao Wang Yixiao Ge Xiaodong Cun David Junhao Zhang Jia-Wei Liu Yuchao Gu Rui Zhao Weisi Lin Wynne Hsu Ying Shan Jay Zhangjie Wu, Guian Fang and Mike Zheng Shou. 2024. Towards A Better Metric for Text-to-Video Generation. arXiv:2401.07781 (2024)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 53, + 544, + 220, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 544, + 220, + 552 + ], + "spans": [ + { + "bbox": [ + 53, + 544, + 220, + 552 + ], + "type": "text", + "content": "[15] Solomon Kullback. 1951. Kullback-leibler divergence." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 53, + 552, + 294, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 552, + 294, + 575 + ], + "spans": [ + { + "bbox": [ + 53, + 552, + 294, + 575 + ], + "type": "text", + "content": "[16] Xin Lai, Zhuotao Tian, Yukang Chen, Senqiao Yang, Xiangru Peng, and Jiaya Jia. 2024. Step-dpo: Step-wise preference optimization for long-chain reasoning of llms. arXiv preprint arXiv:2406.18629 (2024)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 53, + 576, + 294, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 576, + 294, + 608 + ], + "spans": [ + { + "bbox": [ + 53, + 576, + 294, + 608 + ], + "type": "text", + "content": "[17] Baiqi Li, Zhiqiu Lin, Deepak Pathak, Jiayao Emily Li, Xide Xia, Graham Neubig, Pengchuan Zhang, and Deva Ramanan. 2024. GenAI-bench: A holistic benchmark for compositional text-to-visual generation. In Synthetic Data for Computer Vision Workshop@ CVPR 2024." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 53, + 609, + 294, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 609, + 294, + 632 + ], + "spans": [ + { + "bbox": [ + 53, + 609, + 294, + 632 + ], + "type": "text", + "content": "[18] Mingxing Li, Rui Wang, Lei Sun, Yancheng Bai, and Xiangxiang Chu. 2025. Next Token Is Enough: Realistic Image Quality and Aesthetic Scoring with Multimodal Large Language Model. arXiv preprint arXiv:2503.06141 (2025)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 53, + 632, + 294, + 663 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 632, + 294, + 663 + ], + "spans": [ + { + "bbox": [ + 53, + 632, + 294, + 663 + ], + "type": "text", + "content": "[19] Zhiqiu Lin, Deepak Pathak, Baiqi Li, Jiayao Li, Xide Xia, Graham Neubig, Pengchuan Zhang, and Deva Ramanan. 2024. Evaluating text-to-visual generation with image-to-text generation. In European Conference on Computer Vision. Springer, 366–384." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 53, + 664, + 294, + 695 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 664, + 294, + 695 + ], + "spans": [ + { + "bbox": [ + 53, + 664, + 294, + 695 + ], + "type": "text", + "content": "[20] Xinrang Ling, Chen Zhu, Meiqi Wu, Hangyu Li, Xiaokun Feng, Cundian Yang, Aiming Hao, Jiashu Zhu, Jiahong Wu, and Xiangxiang Chu. 2025. VMBench: A Benchmark for Perception-Aligned Video Motion Generation. arXiv preprint arXiv:2503.10076 (2025)." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 317, + 86, + 559, + 700 + ], + "type": "list", + "angle": 0, + "index": 45, + "blocks": [ + { + "bbox": [ + 317, + 86, + 559, + 110 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 86, + 559, + 110 + ], + "spans": [ + { + "bbox": [ + 317, + 86, + 559, + 110 + ], + "type": "text", + "content": "[21] Zichen Liu, Changyu Chen, Wenjun Li, Penghui Qi, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. 2025. Understanding r1-zero-like training: A critical perspective. arXiv preprint arXiv:2503.20783 (2025)." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 317, + 111, + 559, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 111, + 559, + 134 + ], + "spans": [ + { + "bbox": [ + 317, + 111, + 559, + 134 + ], + "type": "text", + "content": "[22] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. 2025. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785 (2025)." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 317, + 135, + 559, + 166 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 135, + 559, + 166 + ], + "spans": [ + { + "bbox": [ + 317, + 135, + 559, + 166 + ], + "type": "text", + "content": "[23] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. 2024. Deepseemath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300 (2024)." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 167, + 522, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 167, + 522, + 175 + ], + "spans": [ + { + "bbox": [ + 317, + 167, + 522, + 175 + ], + "type": "text", + "content": "[24] Genmo Team. 2024. Mochi 1. https://github.com/genmoai/models." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 317, + 175, + 559, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 175, + 559, + 206 + ], + "spans": [ + { + "bbox": [ + 317, + 175, + 559, + 206 + ], + "type": "text", + "content": "[25] Gemini Team, Petko Georgiev, Ving Ian Lei, Ryan Burnell, Libin Bai, Anmol Gulati, Garrett Tanzer, Damien Vincent, Zhufeng Pan, Shibo Wang, et al. 2024. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530 (2024)." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 317, + 207, + 559, + 230 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 207, + 559, + 230 + ], + "spans": [ + { + "bbox": [ + 317, + 207, + 559, + 230 + ], + "type": "text", + "content": "[26] Zachary Teed and Jia Deng, 2020. Raft: Recurrent all-pairs field transforms for optical flow. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part II 16. Springer, 402-419." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 317, + 231, + 559, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 231, + 559, + 261 + ], + "spans": [ + { + "bbox": [ + 317, + 231, + 559, + 261 + ], + "type": "text", + "content": "[27] Zicheng Zhang Chunyi Li Haoning Wu Xiongkuo Min Guangtao Zhai Tengchuan Kou, Xiaohong Liu and Ning Liu. 2024. Subjective-aligned dataset and metric for text-to-video quality assessment. arXiv preprint arXiv:2403.11956 (2024)." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 317, + 262, + 559, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 262, + 559, + 285 + ], + "spans": [ + { + "bbox": [ + 317, + 262, + 559, + 285 + ], + "type": "text", + "content": "[28] Wojciech Zaremba Vicki Cheung Alec Radford Tim Salimans, Ian Goodfellow and Xi Chen. 2016. Improved techniques for training gans. Advances in neural information processing systems, 29 (2016)." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 317, + 286, + 559, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 286, + 559, + 317 + ], + "spans": [ + { + "bbox": [ + 317, + 286, + 559, + 317 + ], + "type": "text", + "content": "[29] Wojciech Zaremba Vicki Cheung Alec Radford Tim Salimans, Ian Goodfellow and Xi Chen. 2021. Chenfei Wu, Lun Huang, Qianxi Zhang, Binyang Li, Lei Ji, Fan Yang, Guillermo Sapiro, and Nan Duan. arXiv preprint arXiv:2104.14806 (2021)." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 317, + 318, + 559, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 318, + 559, + 341 + ], + "spans": [ + { + "bbox": [ + 317, + 318, + 559, + 341 + ], + "type": "text", + "content": "[30] Thomas Unterthiner, Sjoerd Van Steenkiste, Karol Kurach, Raphael Marinier, Marcin Michalski, and Sylvain Gelly. 2019. FVD: A new metric for video generation. ICLR 2019 Workshop DeepGenStruct (2019)." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 317, + 342, + 559, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 342, + 559, + 373 + ], + "spans": [ + { + "bbox": [ + 317, + 342, + 559, + 373 + ], + "type": "text", + "content": "[31] Leandro von Werra, Younes Belkada, Lewis Tunstall, Edward Beeching, Tristan Thrush, Nathan Lambert, Shengyi Huang, Kashif Rasul, and Quentin Galloudec. 2020. TRL: Transformer Reinforcement Learning. https://github.com/huggingface/trl." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 317, + 374, + 559, + 405 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 374, + 559, + 405 + ], + "spans": [ + { + "bbox": [ + 317, + 374, + 559, + 405 + ], + "type": "text", + "content": "[32] Bram Wallace, Meihua Dang, Rafael Rafailov, Linqi Zhou, Aaron Lou, Senthil Purushwalkam, Stefano Ermon, Caiming Xiong, Shafiq Joty, and Nikhil Naik. 2024. Diffusion model alignment using direct preference optimization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 8228-8238." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 317, + 406, + 559, + 429 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 406, + 559, + 429 + ], + "spans": [ + { + "bbox": [ + 317, + 406, + 559, + 429 + ], + "type": "text", + "content": "[33] Yibin Wang, Zhiyu Tan, Junyan Wang, Xiaomeng Yang, Cheng Jin, and Hao Li. 2024. Lift: Leveraging human feedback for text-to-video model alignment. arXiv preprint arXiv:2412.04814 (2024)." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 317, + 430, + 559, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 430, + 559, + 453 + ], + "spans": [ + { + "bbox": [ + 317, + 430, + 559, + 453 + ], + "type": "text", + "content": "[34] Yibin Wang, Yuhang Zang, Hao Li, Cheng Jin, and Jiaqi Wang. 2025. Unified Reward Model for Multimodal Understanding and Generation. arXiv preprint arXiv:2503.05236 (2025)." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 317, + 453, + 559, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 453, + 559, + 485 + ], + "spans": [ + { + "bbox": [ + 317, + 453, + 559, + 485 + ], + "type": "text", + "content": "[35] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems 35 (2022), 24824-24837." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 317, + 486, + 559, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 486, + 559, + 517 + ], + "spans": [ + { + "bbox": [ + 317, + 486, + 559, + 517 + ], + "type": "text", + "content": "[36] Haoning Wu, Zicheng Zhang, Weixia Zhang, Chaofeng Chen, Liang Liao, Chunyi Li, Yixuan Gao, Annan Wang, Erli Zhang, Wenxiu Sun, et al. 2024. Q-Align: Teaching LMMs for Visual Scoring via Discrete Text-Defined Levels. In International Conference on Machine Learning. PMLR, 54015-54029." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 317, + 517, + 559, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 517, + 559, + 548 + ], + "spans": [ + { + "bbox": [ + 317, + 517, + 559, + 548 + ], + "type": "text", + "content": "[37] Jiazheng Xu, Yu Huang, Jiale Cheng, Yuanming Yang, Jiajun Xu, Yuan Wang, Wenbo Duan, Shen Yang, Qunlin Jin, Shurun Li, et al. 2024. Visionreward: Fine-grained multi-dimensional human preference learning for image and video generation. arXiv preprint arXiv:2412.21059 (2024)." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 317, + 549, + 559, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 549, + 559, + 581 + ], + "spans": [ + { + "bbox": [ + 317, + 549, + 559, + 581 + ], + "type": "text", + "content": "[38] Zhuoyi Yang, Jiayan Teng, Wendi Zheng, Ming Ding, Shiyu Huang, Jiazheng Xu, Yuanming Yang, Wenyi Hong, Xiaohan Zhang, Guanyu Feng, et al. 2024. Cogvideox: Text-to-video diffusion models with an expert transformer. arXiv preprint arXiv:2408.06072 (2024)." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 317, + 582, + 559, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 582, + 559, + 612 + ], + "spans": [ + { + "bbox": [ + 317, + 582, + 559, + 612 + ], + "type": "text", + "content": "[39] Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Tom Griffiths, Yuan Cao, and Karthik Narasimhan. 2023. Tree of thoughts: Deliberate problem solving with large language models. Advances in neural information processing systems 36 (2023), 11809-11822." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 317, + 613, + 559, + 636 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 613, + 559, + 636 + ], + "spans": [ + { + "bbox": [ + 317, + 613, + 559, + 636 + ], + "type": "text", + "content": "[40] Xuebo Liu XintaoWang Yong Zhang Haoxin Chen Yang Liu Tieyong Zeng Raymond Chan Yaofang Liu, Xiaodong Cun and Ying Shan. 2024. Evalcrafter: Benchmarking and evaluating large video generation models. (2024)." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 317, + 637, + 559, + 652 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 637, + 559, + 652 + ], + "spans": [ + { + "bbox": [ + 317, + 637, + 559, + 652 + ], + "type": "text", + "content": "[41] Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. 2025. LIMO: Less is More for Reasoning. arXiv preprint arXiv:2502.03387 (2025)." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 317, + 654, + 559, + 676 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 654, + 559, + 676 + ], + "spans": [ + { + "bbox": [ + 317, + 654, + 559, + 676 + ], + "type": "text", + "content": "[42] Ailing Zeng, Yuhang Yang, Weidong Chen, and Wei Liu. 2024. The Dawn of Video Generation: Preliminary Explorations with SORA-like Models. arXiv preprint arXiv:2410.05227 (2024)." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 317, + 677, + 559, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 677, + 559, + 700 + ], + "spans": [ + { + "bbox": [ + 317, + 677, + 559, + 700 + ], + "type": "text", + "content": "[43] Jingyi Zhang, Jiaxing Huang, Sheng Jin, and Shijian Lu. 2024. Vision-language models for vision tasks: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence (2024)." + } + ] + } + ], + "index": 44 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 86, + 294, + 152 + ], + "type": "list", + "angle": 0, + "index": 2, + "blocks": [ + { + "bbox": [ + 52, + 86, + 294, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 86, + 294, + 134 + ], + "spans": [ + { + "bbox": [ + 52, + 86, + 294, + 134 + ], + "type": "text", + "content": "[44] Yaowei Zheng, Richong Zhang, Junhao Zhang, Yanhan Ye, Zheyan Luo, Zhangchi Feng, and Yongqiang Ma. 2024. LlamaFactory: Unified Efficient Fine-Tuning of " + }, + { + "bbox": [ + 52, + 86, + 294, + 134 + ], + "type": "inline_equation", + "content": "100+" + }, + { + "bbox": [ + 52, + 86, + 294, + 134 + ], + "type": "text", + "content": " Language Models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations). Association for Computational Linguistics, Bangkok, Thailand. http://arxiv.org/abs/2403.13372" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 52, + 134, + 294, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 134, + 294, + 152 + ], + "spans": [ + { + "bbox": [ + 52, + 134, + 294, + 152 + ], + "type": "text", + "content": "[45] Zangwei Zheng, Xiangyu Peng, Tianji Yang, Chenhui Shen, Shenggui Li, Hongxin Liu, Yukun Zhou, Tianyi Li, and Yang You. 2024. Open-sora: Democratizing" + } + ] + } + ], + "index": 1 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 317, + 86, + 558, + 152 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 331, + 86, + 538, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 86, + 538, + 95 + ], + "spans": [ + { + "bbox": [ + 331, + 86, + 538, + 95 + ], + "type": "text", + "content": "efficient video production for all. arXiv preprint arXiv:2412.20404 (2024)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 317, + 95, + 558, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 95, + 558, + 118 + ], + "spans": [ + { + "bbox": [ + 317, + 95, + 558, + 118 + ], + "type": "text", + "content": "[46] Hengguang Zhou, Xirui Li, Ruochen Wang, Minhao Cheng, Tianyi Zhou, and Cho-Jui Hsieh. 2025. R1-Zero's \"Aha Moment\" in Visual Reasoning on a 2B Non-SFT Model. arXiv preprint arXiv:2503.05132 (2025)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 317, + 118, + 558, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 118, + 558, + 152 + ], + "spans": [ + { + "bbox": [ + 317, + 118, + 558, + 152 + ], + "type": "text", + "content": "[47] Jiashuo Yu Fan Zhang Chenyang Si Yuming Jiang Yuanhan Zhang Tianxing Wu Qingyang Jin Nattapol Chanpaisit Yaohui Wang Xinyuan Chen Limin Wang Dahua Lin Yu Qiao Ziqi Huang, Yinan He and Ziwei Liu. 2023. Vbench: Comprehensive benchmark suite for video generative models. (2023)." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10563/79555225-b2fc-4e07-9ca0-9f1bf7db778b_content_list.json b/data/2025/2504_10xxx/2504.10563/79555225-b2fc-4e07-9ca0-9f1bf7db778b_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..6245bf34afe5be257da1e6216339508ae877edd6 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10563/79555225-b2fc-4e07-9ca0-9f1bf7db778b_content_list.json @@ -0,0 +1,838 @@ +[ + { + "type": "text", + "text": "Data Augmentation Through Random Style Replacement", + "text_level": 1, + "bbox": [ + 147, + 71, + 852, + 140 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$1^{\\mathrm{st}}$ Qikai Yang\\*", + "bbox": [ + 220, + 161, + 316, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "University of Illinois Urbana-Champaign, Urbana, USA", + "bbox": [ + 98, + 176, + 439, + 188 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "*Corresponding Author: qikaiy2@illinois.edu", + "bbox": [ + 132, + 188, + 405, + 202 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$3^{\\text{th}}$ Huaiying Luo", + "bbox": [ + 214, + 214, + 321, + 228 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Cornell University, Ithaca, USA", + "bbox": [ + 169, + 228, + 366, + 241 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$2^{\\mathrm{nd}}$ Cheng Ji", + "bbox": [ + 683, + 161, + 763, + 175 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "University of Illinois Urbana-Champaign, Urbana, USA", + "bbox": [ + 553, + 176, + 893, + 188 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$4^{\\mathrm{th}}$ Panfeng Li", + "bbox": [ + 676, + 214, + 767, + 228 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "University of Michigan, Ann Arbor, USA", + "bbox": [ + 596, + 228, + 849, + 241 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$5^{\\mathrm{th}}$ Zhicheng Ding", + "bbox": [ + 437, + 253, + 553, + 267 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Columbia University, New York, USA", + "bbox": [ + 379, + 267, + 612, + 279 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract-In this paper, we introduce a novel data augmentation technique that combines the advantages of style augmentation and random erasing by selectively replacing image subregions with style-transferred patches. Our approach first applies a random style transfer to training images, then randomly substitutes selected areas of these images with patches derived from the style-transferred versions. This method is able to seamlessly accommodate a wide range of existing style transfer algorithms and can be readily integrated into diverse data augmentation pipelines. By incorporating our strategy, the training process becomes more robust and less prone to overfitting. Comparative experiments demonstrate that, relative to previous style augmentation methods, our technique achieves superior performance and faster convergence.", + "bbox": [ + 73, + 313, + 491, + 489 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Index Terms—Data Augmentation, Style Transfer, Style Augmentation", + "bbox": [ + 73, + 489, + 490, + 516 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "I. INTRODUCTION", + "text_level": 1, + "bbox": [ + 217, + 532, + 349, + 545 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent advancements in deep learning have driven significant progress in a wide range of computer vision tasks, including image classification, object detection, and semantic segmentation [1, 2]. Despite these advances, many of these tasks continue to face a fundamental bottleneck: a lack of sufficient labeled data [3, 4]. Annotating large-scale datasets is both time-consuming and costly, which can limit the applicability of deep neural networks in specialized or rapidly evolving domains. To mitigate this issue, data augmentation techniques are heavily utilized, artificially expanding and diversifying the training set so that models generalize more effectively.", + "bbox": [ + 73, + 553, + 490, + 718 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Data augmentation [5] has garnered significant attention in supervised learning research across a wide range of domains—including computer vision [6, 7], natural language processing [8], graph learning [9, 10], and large language model [11, 12] — due to its ability to increase both the volume and diversity of training data, thereby enhancing model generalization and mitigating overfitting. Broadly, data augmentation strategies can be grouped into two categories: generative methods, which utilize models like Variational Autoencoders (VAEs) [13], Generative Adversarial Networks (GANs) [14], Large Language Models (LLMs), or diffusion-based frameworks [15] to synthesize new data; and traditional methods, which rely on transformations such as random cropping, flip", + "bbox": [ + 73, + 720, + 491, + 917 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/5c95982de21f32060fcf87daeb2846c2993baf8fe7dc6d6dbed286d6a6f7d61c.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 506, + 306, + 588, + 454 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/c1f4fc762f4606a8c1f77f5930372624fb1fef7cea8ef144d64d24a0642d444f.jpg", + "image_caption": [ + "(b)" + ], + "image_footnote": [], + "bbox": [ + 589, + 308, + 669, + 454 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/395de3fb724960ffe2d001e45b4c0adb878d66ece621d43806a6d2e1bf672551.jpg", + "image_caption": [ + "(c)" + ], + "image_footnote": [], + "bbox": [ + 673, + 308, + 754, + 454 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/12af2ef443c3e140f0e94ec1a27d1d3fb16f508cdfa26628f5af80f4259f59f2.jpg", + "image_caption": [ + "(d)", + "Fig. 1: Examples of random style transfer: we generate a style-transferred image and use it to patch original image in different ways (a) Input Image. (b) Random-Region-Erased Image. (c) Style Transfer. (d) Random Style Replacement. (e) Random Style Replacement." + ], + "image_footnote": [], + "bbox": [ + 756, + 308, + 836, + 454 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/9d8bb25c9129de4886dd493710bdc1ad34676fd2a94cf035d3c30cea079c87ff.jpg", + "image_caption": [ + "(e)" + ], + "image_footnote": [], + "bbox": [ + 841, + 308, + 919, + 454 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ping, rotations, color jittering, and histogram equalization to modify existing samples. While both approaches aim to expose the model to a wider variety of conditions, thus reducing overfitting, traditional augmentation strategies may not fully capture the complexity of real-world variability. Consequently, several studies have explored more refined methods, including style augmentation and random erasing [16-18]. Style augmentation employs style transfer [19] to alter the visual attributes of training images while preserving their semantic content, thereby increasing robustness to differences in texture, color, and contrast. Random erasing, on the other hand, randomly occludes or replaces subregions of an image, making models more resilient to missing or corrupted information. In this paper, we revisit these traditional approaches—particularly focusing on their potential to advance the efficacy of data augmentation in supervised learning.", + "bbox": [ + 501, + 595, + 921, + 837 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this paper, we introduce a novel data augmentation method that merges style augmentation with random erasing. Our approach involves applying a random style transfer to an image, followed by replacing specific subregions with patches from the style-transferred version. This technique enhances", + "bbox": [ + 501, + 840, + 921, + 917 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.10563v2 [cs.CV] 18 Jun 2025", + "bbox": [ + 22, + 279, + 57, + 717 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "robustness against style variations and occlusion-like effects. It integrates smoothly with existing style transfer frameworks and fits easily into standard data augmentation pipelines, as illustrated in Figure 1.", + "bbox": [ + 73, + 70, + 490, + 131 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The main contributions of our work are as below:", + "bbox": [ + 89, + 132, + 431, + 146 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1) We propose a technique that merges style augmentation and random erasing, offering benefits from both texture variation and structured occlusion.", + "2) We demonstrate through experiments that our approach reduces the risk of overfitting while achieving faster convergence compared to established style augmentation methods. Ease of Integration: Our strategy is parameter-free and can be readily adapted to a broad spectrum of computer vision tasks, making it a highly practical solution for data augmentation." + ], + "bbox": [ + 84, + 150, + 491, + 301 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "By leveraging this new augmentation method, we observe notable gains in model performance across different tasks, highlighting its potential to address the persistent challenge of limited labeled data in computer vision research.", + "bbox": [ + 73, + 304, + 491, + 364 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "II. DATASET", + "text_level": 1, + "bbox": [ + 235, + 376, + 330, + 390 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We tested our random style replacement method on the STL-10 dataset, which includes 5,000 training images and 8,000 test images, each with a resolution of $96 \\times 96$ pixels across 10 classes [3]. We chose STL-10 due to its complex backgrounds and high resolution, which pose a substantial challenge for image classification, making it a robust benchmark. Additionally, the limited size of the training set highlights the effectiveness of our data augmentation technique in enhancing training data.", + "bbox": [ + 73, + 397, + 490, + 532 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "III. METHODS", + "text_level": 1, + "bbox": [ + 228, + 545, + 336, + 559 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This sections introduces our random style replacement method in details. We described the overall process of random style replacement and explain how we perform image patch.", + "bbox": [ + 73, + 566, + 490, + 613 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A. Random Style Replacement", + "text_level": 1, + "bbox": [ + 73, + 625, + 285, + 638 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "During training, random style replacement is applied with a certain probability $p$ : for each image $I$ in a mini-batch, there's a probability $p$ that it undergoes style replacement and a probability $1 - p$ that it remains unchanged.", + "bbox": [ + 73, + 643, + 490, + 704 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "If selected, the image will be transformed into a new version with a partial style change. This random style replacement process consists of two steps: generating a complete-style-transferred image and merging it with the original image by certain patching methods. The procedure is shown in Alg. 1.", + "bbox": [ + 73, + 705, + 491, + 780 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Style transfer refers to a class of image processing algorithms that alter the visual style of an image while preserving its semantic content. For style transfer to be part of a data augmentation technique, it needs to be a both fast and random algorithm capable of applying a broad range of styles. Therefore, we adopt the approach of Jackson et al., which efficiently generates a completely style-transferred image by incorporating randomness on the fly without requiring heavy computations [16].", + "bbox": [ + 73, + 780, + 491, + 917 + ], + "page_idx": 1 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1: Random Style Replacement Procedure" + ], + "code_body": "Input: Input image $I$ Augmentation probability $p$ Patch mode pMode; Output: Augmented image $I^{*}$ Initialization: $p_1\\gets \\mathrm{Rand}(0,1)$ 1 if $p_1\\geq p$ then 2 $I^{*}\\gets I$ 3 return $I^{*}$ 4 else 5 $I^{\\prime}\\gets$ randomStyleTransfer(I); 6 $I^{*}\\gets$ randomPatch(I,I',pMode); 7 return $I^{*}$ 8 end", + "bbox": [ + 506, + 88, + 792, + 287 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The generated style-transferred image will then be used to patch the original image, creating an augmented image. There are multiple patching methods, and we adopt the two most common ones: patching by a random subregion and patching randomly selecting individual pixels. To avoid bias in data augmentation, we employed random style transferring to ensure diverse and uniform modifications across all image types, enhancing model generalization.", + "bbox": [ + 503, + 325, + 921, + 446 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "B. Random Patch", + "text_level": 1, + "bbox": [ + 504, + 459, + 629, + 472 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Random patch is to patch a image based on another image. Here, we provided a detailed explanation of random patch by subregion. This method copies a randomly selected region from the style-transferred image onto the original image. Specifically, it randomly selects a rectangle region $I_{e}$ within the image and overwrite all its pixels with those from the style-transferred image.", + "bbox": [ + 501, + 479, + 919, + 585 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Firstly we will determine the shape of the patching area $I_{e}$ . Assume the training image has dimensions $W \\times H$ and an area $S = W \\times H$ . We randomly initialize the area of the patched rectangle region to $S_{e}$ , where $\\frac{S_{e}}{S}$ falls within the range defined by the minimum $s_{l}$ and maximum $s_{h}$ . Similarly, the aspect ratio of the rectangle region, denoted as $r_{e}$ , is randomly chosen between $r_{l}$ and $r_{h}$ . Given those, the dimensions of $I_{e}$ are computed as $H_{e} = \\sqrt{S_{e} \\times r_{e}}$ and $W_{e} = \\sqrt{\\frac{S_{e}}{r_{e}}}$ .", + "bbox": [ + 503, + 585, + 919, + 712 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Next, we randomly select a point $\\mathcal{P} = (x_e, y_e)$ within $I$ to serve as the lower-left corner of $I_e$ . If the selected region $I_e$ are completely inside $I$ (i.e. $x_e + W_e \\leq W$ and $y_e + H_e \\leq H$ ), we define it as the selected rectangular region. Otherwise, we repeat the selection process until a valid $I_e$ is found. The whole procedure for selecting the rectangular region and applying the patch to original image is illustrated in Alg. 2.", + "bbox": [ + 501, + 710, + 921, + 816 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "IV. EXPERIMENT", + "text_level": 1, + "bbox": [ + 648, + 829, + 774, + 842 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A. Experiment Settings", + "text_level": 1, + "bbox": [ + 503, + 849, + 666, + 864 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "As mentioned in previous sections, we evaluated our random style replacement method for image classification using the well-known STL-10 dataset [3]. To ensure the effectiveness", + "bbox": [ + 503, + 869, + 921, + 916 + ], + "page_idx": 1 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 2: Random Patch by Subregion" + ], + "code_body": "Input: Input image $I$ Utility image $I^{\\prime}$ Patched area ratio range $s_l$ and $s_h$ Patched aspect ratio range $r_l$ and $r_h$ Output: Patched image $I^{*}$ \n1 $S_{e}\\gets \\mathrm{Rand}(s_{l},s_{h})\\times S;$ \n2 $r_e\\gets \\mathrm{Rand}(r_l,r_h)$ \n3 $H_{e}\\leftarrow \\sqrt{S_{e}\\times r_{e}}$ $W_{e}\\leftarrow \\sqrt{\\frac{S_{e}}{r_{e}}};$ \n4 while True do \n5 $x_{e}\\gets \\mathrm{Rand}(0,W),y_{e}\\gets \\mathrm{Rand}(0,H);$ if $x_{e} + W_{e}\\leq W$ and $y_{e} + H_{e}\\leq H$ then \n7 $I_{e}\\gets (x_{e},y_{e},x_{e} + W_{e},y_{e} + H_{e});$ \n8 $I(I_e)\\gets I'(I_e);$ \n9 $I^{*}\\gets I$ return $I^{*}$ \n10 end \n12 end", + "bbox": [ + 71, + 89, + 406, + 356 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "and fairness of our evaluation, we set our experiment conditions mostly the same as [16, 17]. The benchmark networks we selected are ResNet18, ResNet50, ResNet101 and ResNet152 without pre-trained parameters [20].", + "bbox": [ + 73, + 393, + 491, + 455 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In all experiments, instead of introducing more advanced optimizers or training procedures such as [21], we selected the Adam optimizer (momentum $\\beta_{1} = 0.5$ , $\\beta_{2} = 0.999$ , initial learning rate of 0.001) to align with the settings of other data augmentation methods [16, 17, 22]. For our setting of the style augmentation parameter, we selected the style interpolation parameter $\\alpha$ as 0.5 and augmentation ratio as 1:1 [16]. All experiments are trained on RTX 4090 with 100 epochs.", + "bbox": [ + 73, + 455, + 490, + 575 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1) Original dataset without any advanced data augmentation techniques.", + "2) Dataset with naive data augmentation by simply copying and stacking the original dataset to match the same augmentation ratio as other groups, along with some routine augmentation operations.", + "3) Dataset with random style replacement by subregion.", + "4) Dataset with random style replacement at the pixel level (with an independent probability $p = 0.5$ )." + ], + "bbox": [ + 84, + 579, + 490, + 715 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "B. Classification Evaluation", + "text_level": 1, + "bbox": [ + 73, + 728, + 269, + 743 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We evaluated our proposed data augmentation technique on the STL-10 dataset, with only 5,000 training images and 8,000 test color images. After the augmentation process, as mentioned in previous sections, the size of the augmented training set will double to 10,000 and the corresponding augmentation treatment will be randomly applied accordingly. We applied the same training settings as prior work, whose effectiveness in strategy and hyperparameter selection, including learning rate, has already been verified.", + "bbox": [ + 73, + 750, + 490, + 883 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our method achieved $81.6\\%$ classification accuracy in just 100 training epochs, as shown in Fig. 2. This result is", + "bbox": [ + 75, + 886, + 491, + 917 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/7e026a64b50dfe4054d7a664322f122990eca0662d52b4f950f3efd2e0769e15.jpg", + "image_caption": [ + "Fig. 2: Classification Accuracy of ResNets on STL-10 test set. \"None\" represents original dataset. \"Naive\" represents dataset with naive data augmentation by simply stacking the original dataset. \"Pixel\" represents dataset with random style replacement at the pixel level. \"Subregion\" represents dataset with random style replacement by subregion." + ], + "image_footnote": [], + "bbox": [ + 526, + 66, + 898, + 210 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "both faster and more accurate than the $80.8\\%$ accuracy after 100,000 epochs reported by Jackson et al. [16], highlighting our approach's efficiency and scalability.", + "bbox": [ + 501, + 334, + 919, + 378 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We also tested our data augmentation technique across various network architectures including ResNet18, ResNet50, ResNet101, and ResNet152, where it consistently outperformed others, demonstrating its robustness and versatility for a wide range of computer vision tasks.", + "bbox": [ + 501, + 378, + 919, + 455 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Furthermore, our findings support those of Zhong et al. [17], who found that erasing entire subregions is more effective than pixel-level erasing. Similarly, our data shows that random style replacement within subregions is a superior augmentation strategy, enhancing the training data's representational richness and contributing to faster model convergence and improved performance. This strategy maintains structural integrity and introduces variations that reflect the natural diversity of real-world datasets.", + "bbox": [ + 501, + 454, + 921, + 589 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/54b898d8583a61f6a009a134ef5de79591a265e8ff8c71a6dae49cdf78b845e9.jpg", + "image_caption": [ + "Fig. 3: Loss of ResNets on STL-10 test set. \"None\" represents original dataset. \"Naive\" represents dataset with naive data augmentation by simply stacking the original dataset. \"Pixel\" represents dataset with random style replacement at the pixel level. \"Subregion\" represents dataset with random style replacement by subregion." + ], + "image_footnote": [], + "bbox": [ + 506, + 599, + 710, + 756 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/fe8a4d9da32436ee4f701ac6a4674312ac9234f40e25d7c3bd7ea6a90ef3413b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 714, + 599, + 916, + 756 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To confirm the effectiveness of our data augmentation technique, we analyzed the test loss of each method on the STL-10 test set, as shown in Fig. 3. In contrast to the naive", + "bbox": [ + 503, + 869, + 921, + 917 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "dataset, whose test loss stops converging after the 20th epoch, all augmented datasets show improved convergence speed and reduced loss variability. Notably, the style augmentation strategy that randomly replaces subregions achieves the fastest convergence and the most stable training process. Despite varying effectiveness in stabilizing training loss across ResNets, our method's performance remains consistently stable.", + "bbox": [ + 73, + 71, + 491, + 176 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "V. CONCLUSIONS", + "text_level": 1, + "bbox": [ + 217, + 190, + 349, + 204 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In conclusion, our random style replacement strategy offers a practical and scalable data augmentation solution for the STL-10 dataset and beyond. By innovatively combining [16] and [17], our proposed data augmentation framework demonstrates superior performance and achieves faster convergence. By randomly replacing subregions rather than individual pixels, we preserve critical structural information while introducing meaningful variability, resulting in faster training convergence and higher accuracy. Our experiments with multiple ResNet architectures consistently verify the robustness of this method, showcasing its versatility for diverse computer vision applications.", + "bbox": [ + 73, + 212, + 491, + 393 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "VI. FUTURE WORK", + "text_level": 1, + "bbox": [ + 210, + 406, + 354, + 420 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Our random style replacement method has shown promising results, yet further validation is needed to confirm its wider applicability. It is crucial to test this technique across various datasets and tasks to establish its generalizability and identify any limitations. Additionally, the convergence speed observations require confirmation through further experiments involving diverse datasets and network architectures. Moreover, integrating Large Language Models (LLMs) guided approaches [23] could enhance the method. These approaches would use LLMs to guide style replacement, potentially selecting optimal subregions for style transfer based on foreground and background information, thus enabling more meaningful and effective transformations.", + "bbox": [ + 73, + 429, + 491, + 625 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 235, + 636, + 331, + 648 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] H.-C. Dan, Z. Huang, B. Lu, and M. Li, \"Image-driven prediction system: Automatic extraction of aggregate gradation of pavement core samples integrating deep learning and interactive image processing framework,\" Construction and Building Materials, 2024.", + "[2] P. Li et al., \"Contextual hourglass network for semantic segmentation of high resolution aerial imagery,\" in ICECAI. IEEE, 2024, pp. 15-18.", + "[3] A. Coates, A. Ng, and H. Lee, \"An analysis of single-layer networks in unsupervised feature learning,\" in PMLR, 2011, pp. 215-223.", + "[4] T. Hastie et al., \"Unsupervised learning,\" The elements of statistical learning: Data mining, inference, and prediction, pp. 485-585, 2009.", + "[5] Q. Yang et al., \"A comparative study on enhancing prediction in social network advertisement through data augmentation,\" in MLISE, 2024.", + "[6] K. Ding et al., \"Data augmentation for deep graph learning: A survey,\" ACM SIGKDD Explorations Newsletter, vol. 24, no. 2, pp. 61-77, 2022.", + "[7] T. Kumar et al., \"Image data augmentation approaches: A comprehensive survey and future directions,\" IEEE Access, 2024.", + "[8] M. Bayer et al., \"Data augmentation in natural language processing: a novel text generation approach for long and short text classifiers,\" *IJMLC*, vol. 14, no. 1, p. 135-150, Apr. 2022.", + "[9] A. Sankar et al., \"Self-supervised role learning for graph neural networks,\" Knowledge and Information Systems, 2022.", + "[10] K. Narang et al., “Ranking user-generated content via multi-relational graph convolution,” in SIGIR, 2021, pp. 470–480." + ], + "bbox": [ + 76, + 654, + 488, + 915 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[11] Y. Jin et al., \"Representation and extraction of diesel engine maintenance knowledge graph with bidirectional relations based on bert and the bilstm-crf model,\" in ICEBE. IEEE, 2021, pp. 126-133.", + "[12] S. Li, H. Xu, and H. Chen, “Focused react: Improving react through reiterate and early stop,” in WiNLP Workshop, 2024.", + "[13] D. P. Kingma and M. Welling, \"Auto-encoding variational bayes,\" arXiv preprint arXiv:1312.6114, 2013.", + "[14] I. J. Goodfellow et al., \"Generative adversarial networks,\" arXiv, 2014.", + "[15] J. Ho et al., “Denoising diffusion probabilistic models,” Advances in neural information processing systems, vol. 33, pp. 6840–6851, 2020.", + "[16] P. T. Jackson et al., \"Style augmentation: data augmentation via style randomization.\" in CVPR workshops, vol. 6, 2019, pp. 10-11.", + "[17] Z. Zhong et al., “Random erasing data augmentation,” in AAAI, vol. 34, no. 07, 2020, pp. 13001-13008.", + "[18] X. Xu et al., \"Style transfer: From stitching to neural networks,\" in ICBASE. IEEE, 2024, pp. 526-530.", + "[19] Z. Ding, P. Li, Q. Yang, S. Li, and Q. Gong, “Regional style and color transfer,” in CVIDL. IEEE, 2024, pp. 593-597.", + "[20] K. He et al., “Deep residual learning for image recognition,” in CVPR, 2016, pp. 770–778.", + "[21] Q. Xu et al., \"A stochastic gda method with backtracking for solving nonconvex (strongly) concave minimax problems,\" arXiv, 2024.", + "[22] D. P. Kingma and J. Ba, “Adam: A method for stochastic optimization,” arXiv preprint arXiv:1412.6980, 2014.", + "[23] Z. Ding, P. Li, Q. Yang, and S. Li, \"Enhance image-to-image generation with llava-generated prompts,\" in ISPDS. IEEE, 2024, pp. 77-81." + ], + "bbox": [ + 506, + 73, + 919, + 369 + ], + "page_idx": 3 + } +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10563/79555225-b2fc-4e07-9ca0-9f1bf7db778b_model.json b/data/2025/2504_10xxx/2504.10563/79555225-b2fc-4e07-9ca0-9f1bf7db778b_model.json new file mode 100644 index 0000000000000000000000000000000000000000..4a832087e001add51b47c1891bcad7c9649b5125 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10563/79555225-b2fc-4e07-9ca0-9f1bf7db778b_model.json @@ -0,0 +1,1187 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.28, + 0.058, + 0.718 + ], + "angle": 270, + "content": "arXiv:2504.10563v2 [cs.CV] 18 Jun 2025" + }, + { + "type": "title", + "bbox": [ + 0.148, + 0.073, + 0.853, + 0.141 + ], + "angle": 0, + "content": "Data Augmentation Through Random Style Replacement" + }, + { + "type": "text", + "bbox": [ + 0.221, + 0.162, + 0.317, + 0.177 + ], + "angle": 0, + "content": "\\(1^{\\mathrm{st}}\\) Qikai Yang\\*" + }, + { + "type": "text", + "bbox": [ + 0.099, + 0.177, + 0.44, + 0.189 + ], + "angle": 0, + "content": "University of Illinois Urbana-Champaign, Urbana, USA" + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.189, + 0.406, + 0.203 + ], + "angle": 0, + "content": "*Corresponding Author: qikaiy2@illinois.edu" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.215, + 0.323, + 0.229 + ], + "angle": 0, + "content": "\\(3^{\\text{th}}\\) Huaiying Luo" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.229, + 0.367, + 0.242 + ], + "angle": 0, + "content": "Cornell University, Ithaca, USA" + }, + { + "type": "text", + "bbox": [ + 0.684, + 0.162, + 0.764, + 0.176 + ], + "angle": 0, + "content": "\\(2^{\\mathrm{nd}}\\) Cheng Ji" + }, + { + "type": "text", + "bbox": [ + 0.554, + 0.177, + 0.894, + 0.189 + ], + "angle": 0, + "content": "University of Illinois Urbana-Champaign, Urbana, USA" + }, + { + "type": "text", + "bbox": [ + 0.678, + 0.215, + 0.769, + 0.229 + ], + "angle": 0, + "content": "\\(4^{\\mathrm{th}}\\) Panfeng Li" + }, + { + "type": "text", + "bbox": [ + 0.598, + 0.229, + 0.85, + 0.242 + ], + "angle": 0, + "content": "University of Michigan, Ann Arbor, USA" + }, + { + "type": "text", + "bbox": [ + 0.439, + 0.254, + 0.554, + 0.268 + ], + "angle": 0, + "content": "\\(5^{\\mathrm{th}}\\) Zhicheng Ding" + }, + { + "type": "text", + "bbox": [ + 0.38, + 0.268, + 0.614, + 0.28 + ], + "angle": 0, + "content": "Columbia University, New York, USA" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.314, + 0.492, + 0.49 + ], + "angle": 0, + "content": "Abstract-In this paper, we introduce a novel data augmentation technique that combines the advantages of style augmentation and random erasing by selectively replacing image subregions with style-transferred patches. Our approach first applies a random style transfer to training images, then randomly substitutes selected areas of these images with patches derived from the style-transferred versions. This method is able to seamlessly accommodate a wide range of existing style transfer algorithms and can be readily integrated into diverse data augmentation pipelines. By incorporating our strategy, the training process becomes more robust and less prone to overfitting. Comparative experiments demonstrate that, relative to previous style augmentation methods, our technique achieves superior performance and faster convergence." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.491, + 0.491, + 0.517 + ], + "angle": 0, + "content": "Index Terms—Data Augmentation, Style Transfer, Style Augmentation" + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.533, + 0.35, + 0.546 + ], + "angle": 0, + "content": "I. INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.554, + 0.491, + 0.719 + ], + "angle": 0, + "content": "Recent advancements in deep learning have driven significant progress in a wide range of computer vision tasks, including image classification, object detection, and semantic segmentation [1, 2]. Despite these advances, many of these tasks continue to face a fundamental bottleneck: a lack of sufficient labeled data [3, 4]. Annotating large-scale datasets is both time-consuming and costly, which can limit the applicability of deep neural networks in specialized or rapidly evolving domains. To mitigate this issue, data augmentation techniques are heavily utilized, artificially expanding and diversifying the training set so that models generalize more effectively." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.721, + 0.492, + 0.918 + ], + "angle": 0, + "content": "Data augmentation [5] has garnered significant attention in supervised learning research across a wide range of domains—including computer vision [6, 7], natural language processing [8], graph learning [9, 10], and large language model [11, 12] — due to its ability to increase both the volume and diversity of training data, thereby enhancing model generalization and mitigating overfitting. Broadly, data augmentation strategies can be grouped into two categories: generative methods, which utilize models like Variational Autoencoders (VAEs) [13], Generative Adversarial Networks (GANs) [14], Large Language Models (LLMs), or diffusion-based frameworks [15] to synthesize new data; and traditional methods, which rely on transformations such as random cropping, flip" + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.308, + 0.589, + 0.455 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.539, + 0.461, + 0.558, + 0.473 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.591, + 0.309, + 0.671, + 0.455 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.622, + 0.461, + 0.641, + 0.473 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image", + "bbox": [ + 0.674, + 0.309, + 0.755, + 0.455 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.706, + 0.461, + 0.724, + 0.473 + ], + "angle": 0, + "content": "(c)" + }, + { + "type": "image", + "bbox": [ + 0.758, + 0.309, + 0.838, + 0.455 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.789, + 0.461, + 0.807, + 0.473 + ], + "angle": 0, + "content": "(d)" + }, + { + "type": "image", + "bbox": [ + 0.842, + 0.309, + 0.921, + 0.455 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.871, + 0.461, + 0.89, + 0.473 + ], + "angle": 0, + "content": "(e)" + }, + { + "type": "image_caption", + "bbox": [ + 0.504, + 0.48, + 0.922, + 0.556 + ], + "angle": 0, + "content": "Fig. 1: Examples of random style transfer: we generate a style-transferred image and use it to patch original image in different ways (a) Input Image. (b) Random-Region-Erased Image. (c) Style Transfer. (d) Random Style Replacement. (e) Random Style Replacement." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.596, + 0.922, + 0.838 + ], + "angle": 0, + "content": "ping, rotations, color jittering, and histogram equalization to modify existing samples. While both approaches aim to expose the model to a wider variety of conditions, thus reducing overfitting, traditional augmentation strategies may not fully capture the complexity of real-world variability. Consequently, several studies have explored more refined methods, including style augmentation and random erasing [16-18]. Style augmentation employs style transfer [19] to alter the visual attributes of training images while preserving their semantic content, thereby increasing robustness to differences in texture, color, and contrast. Random erasing, on the other hand, randomly occludes or replaces subregions of an image, making models more resilient to missing or corrupted information. In this paper, we revisit these traditional approaches—particularly focusing on their potential to advance the efficacy of data augmentation in supervised learning." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.841, + 0.922, + 0.918 + ], + "angle": 0, + "content": "In this paper, we introduce a novel data augmentation method that merges style augmentation with random erasing. Our approach involves applying a random style transfer to an image, followed by replacing specific subregions with patches from the style-transferred version. This technique enhances" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.074, + 0.071, + 0.491, + 0.132 + ], + "angle": 0, + "content": "robustness against style variations and occlusion-like effects. It integrates smoothly with existing style transfer frameworks and fits easily into standard data augmentation pipelines, as illustrated in Figure 1." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.133, + 0.433, + 0.147 + ], + "angle": 0, + "content": "The main contributions of our work are as below:" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.151, + 0.492, + 0.196 + ], + "angle": 0, + "content": "1) We propose a technique that merges style augmentation and random erasing, offering benefits from both texture variation and structured occlusion." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.196, + 0.492, + 0.302 + ], + "angle": 0, + "content": "2) We demonstrate through experiments that our approach reduces the risk of overfitting while achieving faster convergence compared to established style augmentation methods. Ease of Integration: Our strategy is parameter-free and can be readily adapted to a broad spectrum of computer vision tasks, making it a highly practical solution for data augmentation." + }, + { + "type": "list", + "bbox": [ + 0.086, + 0.151, + 0.492, + 0.302 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.305, + 0.492, + 0.366 + ], + "angle": 0, + "content": "By leveraging this new augmentation method, we observe notable gains in model performance across different tasks, highlighting its potential to address the persistent challenge of limited labeled data in computer vision research." + }, + { + "type": "title", + "bbox": [ + 0.236, + 0.377, + 0.331, + 0.391 + ], + "angle": 0, + "content": "II. DATASET" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.398, + 0.491, + 0.534 + ], + "angle": 0, + "content": "We tested our random style replacement method on the STL-10 dataset, which includes 5,000 training images and 8,000 test images, each with a resolution of \\(96 \\times 96\\) pixels across 10 classes [3]. We chose STL-10 due to its complex backgrounds and high resolution, which pose a substantial challenge for image classification, making it a robust benchmark. Additionally, the limited size of the training set highlights the effectiveness of our data augmentation technique in enhancing training data." + }, + { + "type": "title", + "bbox": [ + 0.23, + 0.546, + 0.337, + 0.56 + ], + "angle": 0, + "content": "III. METHODS" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.567, + 0.491, + 0.614 + ], + "angle": 0, + "content": "This sections introduces our random style replacement method in details. We described the overall process of random style replacement and explain how we perform image patch." + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.625, + 0.286, + 0.64 + ], + "angle": 0, + "content": "A. Random Style Replacement" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.645, + 0.491, + 0.705 + ], + "angle": 0, + "content": "During training, random style replacement is applied with a certain probability \\( p \\): for each image \\( I \\) in a mini-batch, there's a probability \\( p \\) that it undergoes style replacement and a probability \\( 1 - p \\) that it remains unchanged." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.706, + 0.492, + 0.781 + ], + "angle": 0, + "content": "If selected, the image will be transformed into a new version with a partial style change. This random style replacement process consists of two steps: generating a complete-style-transferred image and merging it with the original image by certain patching methods. The procedure is shown in Alg. 1." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.781, + 0.492, + 0.918 + ], + "angle": 0, + "content": "Style transfer refers to a class of image processing algorithms that alter the visual style of an image while preserving its semantic content. For style transfer to be part of a data augmentation technique, it needs to be a both fast and random algorithm capable of applying a broad range of styles. Therefore, we adopt the approach of Jackson et al., which efficiently generates a completely style-transferred image by incorporating randomness on the fly without requiring heavy computations [16]." + }, + { + "type": "code_caption", + "bbox": [ + 0.515, + 0.073, + 0.874, + 0.087 + ], + "angle": 0, + "content": "Algorithm 1: Random Style Replacement Procedure" + }, + { + "type": "algorithm", + "bbox": [ + 0.508, + 0.089, + 0.793, + 0.289 + ], + "angle": 0, + "content": "Input: Input image \\(I\\) Augmentation probability \\(p\\) Patch mode pMode; Output: Augmented image \\(I^{*}\\) Initialization: \\(p_1\\gets \\mathrm{Rand}(0,1)\\) 1 if \\(p_1\\geq p\\) then 2 \\(I^{*}\\gets I\\) 3 return \\(I^{*}\\) 4 else 5 \\(I^{\\prime}\\gets\\) randomStyleTransfer(I); 6 \\(I^{*}\\gets\\) randomPatch(I,I',pMode); 7 return \\(I^{*}\\) 8 end" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.327, + 0.922, + 0.448 + ], + "angle": 0, + "content": "The generated style-transferred image will then be used to patch the original image, creating an augmented image. There are multiple patching methods, and we adopt the two most common ones: patching by a random subregion and patching randomly selecting individual pixels. To avoid bias in data augmentation, we employed random style transferring to ensure diverse and uniform modifications across all image types, enhancing model generalization." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.46, + 0.63, + 0.473 + ], + "angle": 0, + "content": "B. Random Patch" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.481, + 0.921, + 0.586 + ], + "angle": 0, + "content": "Random patch is to patch a image based on another image. Here, we provided a detailed explanation of random patch by subregion. This method copies a randomly selected region from the style-transferred image onto the original image. Specifically, it randomly selects a rectangle region \\( I_{e} \\) within the image and overwrite all its pixels with those from the style-transferred image." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.587, + 0.92, + 0.713 + ], + "angle": 0, + "content": "Firstly we will determine the shape of the patching area \\( I_{e} \\). Assume the training image has dimensions \\( W \\times H \\) and an area \\( S = W \\times H \\). We randomly initialize the area of the patched rectangle region to \\( S_{e} \\), where \\( \\frac{S_{e}}{S} \\) falls within the range defined by the minimum \\( s_{l} \\) and maximum \\( s_{h} \\). Similarly, the aspect ratio of the rectangle region, denoted as \\( r_{e} \\), is randomly chosen between \\( r_{l} \\) and \\( r_{h} \\). Given those, the dimensions of \\( I_{e} \\) are computed as \\( H_{e} = \\sqrt{S_{e} \\times r_{e}} \\) and \\( W_{e} = \\sqrt{\\frac{S_{e}}{r_{e}}} \\)." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.712, + 0.922, + 0.818 + ], + "angle": 0, + "content": "Next, we randomly select a point \\(\\mathcal{P} = (x_e, y_e)\\) within \\(I\\) to serve as the lower-left corner of \\(I_e\\). If the selected region \\(I_e\\) are completely inside \\(I\\) (i.e. \\(x_e + W_e \\leq W\\) and \\(y_e + H_e \\leq H\\)), we define it as the selected rectangular region. Otherwise, we repeat the selection process until a valid \\(I_e\\) is found. The whole procedure for selecting the rectangular region and applying the patch to original image is illustrated in Alg. 2." + }, + { + "type": "title", + "bbox": [ + 0.649, + 0.83, + 0.776, + 0.843 + ], + "angle": 0, + "content": "IV. EXPERIMENT" + }, + { + "type": "title", + "bbox": [ + 0.504, + 0.851, + 0.667, + 0.866 + ], + "angle": 0, + "content": "A. Experiment Settings" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.871, + 0.922, + 0.917 + ], + "angle": 0, + "content": "As mentioned in previous sections, we evaluated our random style replacement method for image classification using the well-known STL-10 dataset [3]. To ensure the effectiveness" + } + ], + [ + { + "type": "code_caption", + "bbox": [ + 0.084, + 0.072, + 0.378, + 0.088 + ], + "angle": 0, + "content": "Algorithm 2: Random Patch by Subregion" + }, + { + "type": "algorithm", + "bbox": [ + 0.073, + 0.09, + 0.408, + 0.357 + ], + "angle": 0, + "content": "Input: Input image \\(I\\) Utility image \\(I^{\\prime}\\) Patched area ratio range \\(s_l\\) and \\(s_h\\) Patched aspect ratio range \\(r_l\\) and \\(r_h\\) Output: Patched image \\(I^{*}\\) \n1 \\(S_{e}\\gets \\mathrm{Rand}(s_{l},s_{h})\\times S;\\) \n2 \\(r_e\\gets \\mathrm{Rand}(r_l,r_h)\\) \n3 \\(H_{e}\\leftarrow \\sqrt{S_{e}\\times r_{e}}\\) \\(W_{e}\\leftarrow \\sqrt{\\frac{S_{e}}{r_{e}}};\\) \n4 while True do \n5 \\(x_{e}\\gets \\mathrm{Rand}(0,W),y_{e}\\gets \\mathrm{Rand}(0,H);\\) if \\(x_{e} + W_{e}\\leq W\\) and \\(y_{e} + H_{e}\\leq H\\) then \n7 \\(I_{e}\\gets (x_{e},y_{e},x_{e} + W_{e},y_{e} + H_{e});\\) \n8 \\(I(I_e)\\gets I'(I_e);\\) \n9 \\(I^{*}\\gets I\\) return \\(I^{*}\\) \n10 end \n12 end" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.394, + 0.492, + 0.456 + ], + "angle": 0, + "content": "and fairness of our evaluation, we set our experiment conditions mostly the same as [16, 17]. The benchmark networks we selected are ResNet18, ResNet50, ResNet101 and ResNet152 without pre-trained parameters [20]." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.456, + 0.491, + 0.577 + ], + "angle": 0, + "content": "In all experiments, instead of introducing more advanced optimizers or training procedures such as [21], we selected the Adam optimizer (momentum \\(\\beta_{1} = 0.5\\), \\(\\beta_{2} = 0.999\\), initial learning rate of 0.001) to align with the settings of other data augmentation methods [16, 17, 22]. For our setting of the style augmentation parameter, we selected the style interpolation parameter \\(\\alpha\\) as 0.5 and augmentation ratio as 1:1 [16]. All experiments are trained on RTX 4090 with 100 epochs." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.58, + 0.49, + 0.61 + ], + "angle": 0, + "content": "1) Original dataset without any advanced data augmentation techniques." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.611, + 0.491, + 0.67 + ], + "angle": 0, + "content": "2) Dataset with naive data augmentation by simply copying and stacking the original dataset to match the same augmentation ratio as other groups, along with some routine augmentation operations." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.671, + 0.47, + 0.685 + ], + "angle": 0, + "content": "3) Dataset with random style replacement by subregion." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.686, + 0.49, + 0.716 + ], + "angle": 0, + "content": "4) Dataset with random style replacement at the pixel level (with an independent probability \\( p = 0.5 \\))." + }, + { + "type": "list", + "bbox": [ + 0.086, + 0.58, + 0.491, + 0.716 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.729, + 0.27, + 0.744 + ], + "angle": 0, + "content": "B. Classification Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.75, + 0.491, + 0.885 + ], + "angle": 0, + "content": "We evaluated our proposed data augmentation technique on the STL-10 dataset, with only 5,000 training images and 8,000 test color images. After the augmentation process, as mentioned in previous sections, the size of the augmented training set will double to 10,000 and the corresponding augmentation treatment will be randomly applied accordingly. We applied the same training settings as prior work, whose effectiveness in strategy and hyperparameter selection, including learning rate, has already been verified." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.887, + 0.492, + 0.918 + ], + "angle": 0, + "content": "Our method achieved \\(81.6\\%\\) classification accuracy in just 100 training epochs, as shown in Fig. 2. This result is" + }, + { + "type": "image", + "bbox": [ + 0.527, + 0.067, + 0.9, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.503, + 0.217, + 0.922, + 0.309 + ], + "angle": 0, + "content": "Fig. 2: Classification Accuracy of ResNets on STL-10 test set. \"None\" represents original dataset. \"Naive\" represents dataset with naive data augmentation by simply stacking the original dataset. \"Pixel\" represents dataset with random style replacement at the pixel level. \"Subregion\" represents dataset with random style replacement by subregion." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.335, + 0.921, + 0.38 + ], + "angle": 0, + "content": "both faster and more accurate than the \\(80.8\\%\\) accuracy after 100,000 epochs reported by Jackson et al. [16], highlighting our approach's efficiency and scalability." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.38, + 0.92, + 0.456 + ], + "angle": 0, + "content": "We also tested our data augmentation technique across various network architectures including ResNet18, ResNet50, ResNet101, and ResNet152, where it consistently outperformed others, demonstrating its robustness and versatility for a wide range of computer vision tasks." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.455, + 0.922, + 0.59 + ], + "angle": 0, + "content": "Furthermore, our findings support those of Zhong et al. [17], who found that erasing entire subregions is more effective than pixel-level erasing. Similarly, our data shows that random style replacement within subregions is a superior augmentation strategy, enhancing the training data's representational richness and contributing to faster model convergence and improved performance. This strategy maintains structural integrity and introduces variations that reflect the natural diversity of real-world datasets." + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.6, + 0.712, + 0.757 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.715, + 0.6, + 0.918, + 0.757 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.503, + 0.763, + 0.922, + 0.854 + ], + "angle": 0, + "content": "Fig. 3: Loss of ResNets on STL-10 test set. \"None\" represents original dataset. \"Naive\" represents dataset with naive data augmentation by simply stacking the original dataset. \"Pixel\" represents dataset with random style replacement at the pixel level. \"Subregion\" represents dataset with random style replacement by subregion." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.871, + 0.922, + 0.918 + ], + "angle": 0, + "content": "To confirm the effectiveness of our data augmentation technique, we analyzed the test loss of each method on the STL-10 test set, as shown in Fig. 3. In contrast to the naive" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.074, + 0.072, + 0.493, + 0.178 + ], + "angle": 0, + "content": "dataset, whose test loss stops converging after the 20th epoch, all augmented datasets show improved convergence speed and reduced loss variability. Notably, the style augmentation strategy that randomly replaces subregions achieves the fastest convergence and the most stable training process. Despite varying effectiveness in stabilizing training loss across ResNets, our method's performance remains consistently stable." + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.191, + 0.35, + 0.205 + ], + "angle": 0, + "content": "V. CONCLUSIONS" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.213, + 0.493, + 0.395 + ], + "angle": 0, + "content": "In conclusion, our random style replacement strategy offers a practical and scalable data augmentation solution for the STL-10 dataset and beyond. By innovatively combining [16] and [17], our proposed data augmentation framework demonstrates superior performance and achieves faster convergence. By randomly replacing subregions rather than individual pixels, we preserve critical structural information while introducing meaningful variability, resulting in faster training convergence and higher accuracy. Our experiments with multiple ResNet architectures consistently verify the robustness of this method, showcasing its versatility for diverse computer vision applications." + }, + { + "type": "title", + "bbox": [ + 0.212, + 0.407, + 0.356, + 0.421 + ], + "angle": 0, + "content": "VI. FUTURE WORK" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.43, + 0.492, + 0.626 + ], + "angle": 0, + "content": "Our random style replacement method has shown promising results, yet further validation is needed to confirm its wider applicability. It is crucial to test this technique across various datasets and tasks to establish its generalizability and identify any limitations. Additionally, the convergence speed observations require confirmation through further experiments involving diverse datasets and network architectures. Moreover, integrating Large Language Models (LLMs) guided approaches [23] could enhance the method. These approaches would use LLMs to guide style replacement, potentially selecting optimal subregions for style transfer based on foreground and background information, thus enabling more meaningful and effective transformations." + }, + { + "type": "title", + "bbox": [ + 0.236, + 0.637, + 0.333, + 0.65 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.655, + 0.49, + 0.7 + ], + "angle": 0, + "content": "[1] H.-C. Dan, Z. Huang, B. Lu, and M. Li, \"Image-driven prediction system: Automatic extraction of aggregate gradation of pavement core samples integrating deep learning and interactive image processing framework,\" Construction and Building Materials, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.7, + 0.49, + 0.723 + ], + "angle": 0, + "content": "[2] P. Li et al., \"Contextual hourglass network for semantic segmentation of high resolution aerial imagery,\" in ICECAI. IEEE, 2024, pp. 15-18." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.723, + 0.49, + 0.746 + ], + "angle": 0, + "content": "[3] A. Coates, A. Ng, and H. Lee, \"An analysis of single-layer networks in unsupervised feature learning,\" in PMLR, 2011, pp. 215-223." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.746, + 0.49, + 0.768 + ], + "angle": 0, + "content": "[4] T. Hastie et al., \"Unsupervised learning,\" The elements of statistical learning: Data mining, inference, and prediction, pp. 485-585, 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.768, + 0.49, + 0.791 + ], + "angle": 0, + "content": "[5] Q. Yang et al., \"A comparative study on enhancing prediction in social network advertisement through data augmentation,\" in MLISE, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.791, + 0.49, + 0.814 + ], + "angle": 0, + "content": "[6] K. Ding et al., \"Data augmentation for deep graph learning: A survey,\" ACM SIGKDD Explorations Newsletter, vol. 24, no. 2, pp. 61-77, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.814, + 0.49, + 0.836 + ], + "angle": 0, + "content": "[7] T. Kumar et al., \"Image data augmentation approaches: A comprehensive survey and future directions,\" IEEE Access, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.836, + 0.49, + 0.87 + ], + "angle": 0, + "content": "[8] M. Bayer et al., \"Data augmentation in natural language processing: a novel text generation approach for long and short text classifiers,\" *IJMLC*, vol. 14, no. 1, p. 135-150, Apr. 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.871, + 0.49, + 0.893 + ], + "angle": 0, + "content": "[9] A. Sankar et al., \"Self-supervised role learning for graph neural networks,\" Knowledge and Information Systems, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.894, + 0.49, + 0.916 + ], + "angle": 0, + "content": "[10] K. Narang et al., “Ranking user-generated content via multi-relational graph convolution,” in SIGIR, 2021, pp. 470–480." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.655, + 0.49, + 0.916 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.074, + 0.92, + 0.108 + ], + "angle": 0, + "content": "[11] Y. Jin et al., \"Representation and extraction of diesel engine maintenance knowledge graph with bidirectional relations based on bert and the bilstm-crf model,\" in ICEBE. IEEE, 2021, pp. 126-133." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.108, + 0.92, + 0.131 + ], + "angle": 0, + "content": "[12] S. Li, H. Xu, and H. Chen, “Focused react: Improving react through reiterate and early stop,” in WiNLP Workshop, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.131, + 0.92, + 0.153 + ], + "angle": 0, + "content": "[13] D. P. Kingma and M. Welling, \"Auto-encoding variational bayes,\" arXiv preprint arXiv:1312.6114, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.154, + 0.92, + 0.165 + ], + "angle": 0, + "content": "[14] I. J. Goodfellow et al., \"Generative adversarial networks,\" arXiv, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.165, + 0.92, + 0.187 + ], + "angle": 0, + "content": "[15] J. Ho et al., “Denoising diffusion probabilistic models,” Advances in neural information processing systems, vol. 33, pp. 6840–6851, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.187, + 0.92, + 0.21 + ], + "angle": 0, + "content": "[16] P. T. Jackson et al., \"Style augmentation: data augmentation via style randomization.\" in CVPR workshops, vol. 6, 2019, pp. 10-11." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.21, + 0.92, + 0.233 + ], + "angle": 0, + "content": "[17] Z. Zhong et al., “Random erasing data augmentation,” in AAAI, vol. 34, no. 07, 2020, pp. 13001-13008." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.233, + 0.92, + 0.255 + ], + "angle": 0, + "content": "[18] X. Xu et al., \"Style transfer: From stitching to neural networks,\" in ICBASE. IEEE, 2024, pp. 526-530." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.255, + 0.92, + 0.278 + ], + "angle": 0, + "content": "[19] Z. Ding, P. Li, Q. Yang, S. Li, and Q. Gong, “Regional style and color transfer,” in CVIDL. IEEE, 2024, pp. 593-597." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.278, + 0.92, + 0.3 + ], + "angle": 0, + "content": "[20] K. He et al., “Deep residual learning for image recognition,” in CVPR, 2016, pp. 770–778." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.3, + 0.92, + 0.323 + ], + "angle": 0, + "content": "[21] Q. Xu et al., \"A stochastic gda method with backtracking for solving nonconvex (strongly) concave minimax problems,\" arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.323, + 0.92, + 0.346 + ], + "angle": 0, + "content": "[22] D. P. Kingma and J. Ba, “Adam: A method for stochastic optimization,” arXiv preprint arXiv:1412.6980, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.346, + 0.92, + 0.37 + ], + "angle": 0, + "content": "[23] Z. Ding, P. Li, Q. Yang, and S. Li, \"Enhance image-to-image generation with llava-generated prompts,\" in ISPDS. IEEE, 2024, pp. 77-81." + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.074, + 0.92, + 0.37 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10563/79555225-b2fc-4e07-9ca0-9f1bf7db778b_origin.pdf b/data/2025/2504_10xxx/2504.10563/79555225-b2fc-4e07-9ca0-9f1bf7db778b_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c70ebe2d311edab343526a85d34cd9f8d1283a6d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10563/79555225-b2fc-4e07-9ca0-9f1bf7db778b_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da5ce8a646558a0a57e3c03f83087a781e656ebadcbb8b297dd029de9d62e19f +size 34941210 diff --git a/data/2025/2504_10xxx/2504.10563/full.md b/data/2025/2504_10xxx/2504.10563/full.md new file mode 100644 index 0000000000000000000000000000000000000000..553558b50bcdb913b8e41541bf89913c919f21c5 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10563/full.md @@ -0,0 +1,178 @@ +# Data Augmentation Through Random Style Replacement + +$1^{\mathrm{st}}$ Qikai Yang\* + +University of Illinois Urbana-Champaign, Urbana, USA + +*Corresponding Author: qikaiy2@illinois.edu + +$3^{\text{th}}$ Huaiying Luo + +Cornell University, Ithaca, USA + +$2^{\mathrm{nd}}$ Cheng Ji + +University of Illinois Urbana-Champaign, Urbana, USA + +$4^{\mathrm{th}}$ Panfeng Li + +University of Michigan, Ann Arbor, USA + +$5^{\mathrm{th}}$ Zhicheng Ding + +Columbia University, New York, USA + +Abstract-In this paper, we introduce a novel data augmentation technique that combines the advantages of style augmentation and random erasing by selectively replacing image subregions with style-transferred patches. Our approach first applies a random style transfer to training images, then randomly substitutes selected areas of these images with patches derived from the style-transferred versions. This method is able to seamlessly accommodate a wide range of existing style transfer algorithms and can be readily integrated into diverse data augmentation pipelines. By incorporating our strategy, the training process becomes more robust and less prone to overfitting. Comparative experiments demonstrate that, relative to previous style augmentation methods, our technique achieves superior performance and faster convergence. + +Index Terms—Data Augmentation, Style Transfer, Style Augmentation + +# I. INTRODUCTION + +Recent advancements in deep learning have driven significant progress in a wide range of computer vision tasks, including image classification, object detection, and semantic segmentation [1, 2]. Despite these advances, many of these tasks continue to face a fundamental bottleneck: a lack of sufficient labeled data [3, 4]. Annotating large-scale datasets is both time-consuming and costly, which can limit the applicability of deep neural networks in specialized or rapidly evolving domains. To mitigate this issue, data augmentation techniques are heavily utilized, artificially expanding and diversifying the training set so that models generalize more effectively. + +Data augmentation [5] has garnered significant attention in supervised learning research across a wide range of domains—including computer vision [6, 7], natural language processing [8], graph learning [9, 10], and large language model [11, 12] — due to its ability to increase both the volume and diversity of training data, thereby enhancing model generalization and mitigating overfitting. Broadly, data augmentation strategies can be grouped into two categories: generative methods, which utilize models like Variational Autoencoders (VAEs) [13], Generative Adversarial Networks (GANs) [14], Large Language Models (LLMs), or diffusion-based frameworks [15] to synthesize new data; and traditional methods, which rely on transformations such as random cropping, flip + +![](images/5c95982de21f32060fcf87daeb2846c2993baf8fe7dc6d6dbed286d6a6f7d61c.jpg) +(a) + +![](images/c1f4fc762f4606a8c1f77f5930372624fb1fef7cea8ef144d64d24a0642d444f.jpg) +(b) + +![](images/395de3fb724960ffe2d001e45b4c0adb878d66ece621d43806a6d2e1bf672551.jpg) +(c) + +![](images/12af2ef443c3e140f0e94ec1a27d1d3fb16f508cdfa26628f5af80f4259f59f2.jpg) +(d) +Fig. 1: Examples of random style transfer: we generate a style-transferred image and use it to patch original image in different ways (a) Input Image. (b) Random-Region-Erased Image. (c) Style Transfer. (d) Random Style Replacement. (e) Random Style Replacement. + +![](images/9d8bb25c9129de4886dd493710bdc1ad34676fd2a94cf035d3c30cea079c87ff.jpg) +(e) + +ping, rotations, color jittering, and histogram equalization to modify existing samples. While both approaches aim to expose the model to a wider variety of conditions, thus reducing overfitting, traditional augmentation strategies may not fully capture the complexity of real-world variability. Consequently, several studies have explored more refined methods, including style augmentation and random erasing [16-18]. Style augmentation employs style transfer [19] to alter the visual attributes of training images while preserving their semantic content, thereby increasing robustness to differences in texture, color, and contrast. Random erasing, on the other hand, randomly occludes or replaces subregions of an image, making models more resilient to missing or corrupted information. In this paper, we revisit these traditional approaches—particularly focusing on their potential to advance the efficacy of data augmentation in supervised learning. + +In this paper, we introduce a novel data augmentation method that merges style augmentation with random erasing. Our approach involves applying a random style transfer to an image, followed by replacing specific subregions with patches from the style-transferred version. This technique enhances + +robustness against style variations and occlusion-like effects. It integrates smoothly with existing style transfer frameworks and fits easily into standard data augmentation pipelines, as illustrated in Figure 1. + +The main contributions of our work are as below: + +1) We propose a technique that merges style augmentation and random erasing, offering benefits from both texture variation and structured occlusion. +2) We demonstrate through experiments that our approach reduces the risk of overfitting while achieving faster convergence compared to established style augmentation methods. Ease of Integration: Our strategy is parameter-free and can be readily adapted to a broad spectrum of computer vision tasks, making it a highly practical solution for data augmentation. + +By leveraging this new augmentation method, we observe notable gains in model performance across different tasks, highlighting its potential to address the persistent challenge of limited labeled data in computer vision research. + +# II. DATASET + +We tested our random style replacement method on the STL-10 dataset, which includes 5,000 training images and 8,000 test images, each with a resolution of $96 \times 96$ pixels across 10 classes [3]. We chose STL-10 due to its complex backgrounds and high resolution, which pose a substantial challenge for image classification, making it a robust benchmark. Additionally, the limited size of the training set highlights the effectiveness of our data augmentation technique in enhancing training data. + +# III. METHODS + +This sections introduces our random style replacement method in details. We described the overall process of random style replacement and explain how we perform image patch. + +# A. Random Style Replacement + +During training, random style replacement is applied with a certain probability $p$ : for each image $I$ in a mini-batch, there's a probability $p$ that it undergoes style replacement and a probability $1 - p$ that it remains unchanged. + +If selected, the image will be transformed into a new version with a partial style change. This random style replacement process consists of two steps: generating a complete-style-transferred image and merging it with the original image by certain patching methods. The procedure is shown in Alg. 1. + +Style transfer refers to a class of image processing algorithms that alter the visual style of an image while preserving its semantic content. For style transfer to be part of a data augmentation technique, it needs to be a both fast and random algorithm capable of applying a broad range of styles. Therefore, we adopt the approach of Jackson et al., which efficiently generates a completely style-transferred image by incorporating randomness on the fly without requiring heavy computations [16]. + +Algorithm 1: Random Style Replacement Procedure +Input: Input image $I$ Augmentation probability $p$ Patch mode pMode; Output: Augmented image $I^{*}$ Initialization: $p_1\gets \mathrm{Rand}(0,1)$ 1 if $p_1\geq p$ then 2 $I^{*}\gets I$ 3 return $I^{*}$ 4 else 5 $I^{\prime}\gets$ randomStyleTransfer(I); 6 $I^{*}\gets$ randomPatch(I,I',pMode); 7 return $I^{*}$ 8 end + +The generated style-transferred image will then be used to patch the original image, creating an augmented image. There are multiple patching methods, and we adopt the two most common ones: patching by a random subregion and patching randomly selecting individual pixels. To avoid bias in data augmentation, we employed random style transferring to ensure diverse and uniform modifications across all image types, enhancing model generalization. + +# B. Random Patch + +Random patch is to patch a image based on another image. Here, we provided a detailed explanation of random patch by subregion. This method copies a randomly selected region from the style-transferred image onto the original image. Specifically, it randomly selects a rectangle region $I_{e}$ within the image and overwrite all its pixels with those from the style-transferred image. + +Firstly we will determine the shape of the patching area $I_{e}$ . Assume the training image has dimensions $W \times H$ and an area $S = W \times H$ . We randomly initialize the area of the patched rectangle region to $S_{e}$ , where $\frac{S_{e}}{S}$ falls within the range defined by the minimum $s_{l}$ and maximum $s_{h}$ . Similarly, the aspect ratio of the rectangle region, denoted as $r_{e}$ , is randomly chosen between $r_{l}$ and $r_{h}$ . Given those, the dimensions of $I_{e}$ are computed as $H_{e} = \sqrt{S_{e} \times r_{e}}$ and $W_{e} = \sqrt{\frac{S_{e}}{r_{e}}}$ . + +Next, we randomly select a point $\mathcal{P} = (x_e, y_e)$ within $I$ to serve as the lower-left corner of $I_e$ . If the selected region $I_e$ are completely inside $I$ (i.e. $x_e + W_e \leq W$ and $y_e + H_e \leq H$ ), we define it as the selected rectangular region. Otherwise, we repeat the selection process until a valid $I_e$ is found. The whole procedure for selecting the rectangular region and applying the patch to original image is illustrated in Alg. 2. + +# IV. EXPERIMENT + +# A. Experiment Settings + +As mentioned in previous sections, we evaluated our random style replacement method for image classification using the well-known STL-10 dataset [3]. To ensure the effectiveness + +Algorithm 2: Random Patch by Subregion +Input: Input image $I$ Utility image $I^{\prime}$ Patched area ratio range $s_l$ and $s_h$ Patched aspect ratio range $r_l$ and $r_h$ Output: Patched image $I^{*}$ +1 $S_{e}\gets \mathrm{Rand}(s_{l},s_{h})\times S;$ +2 $r_e\gets \mathrm{Rand}(r_l,r_h)$ +3 $H_{e}\leftarrow \sqrt{S_{e}\times r_{e}}$ $W_{e}\leftarrow \sqrt{\frac{S_{e}}{r_{e}}};$ +4 while True do +5 $x_{e}\gets \mathrm{Rand}(0,W),y_{e}\gets \mathrm{Rand}(0,H);$ if $x_{e} + W_{e}\leq W$ and $y_{e} + H_{e}\leq H$ then +7 $I_{e}\gets (x_{e},y_{e},x_{e} + W_{e},y_{e} + H_{e});$ +8 $I(I_e)\gets I'(I_e);$ +9 $I^{*}\gets I$ return $I^{*}$ +10 end +12 end + +and fairness of our evaluation, we set our experiment conditions mostly the same as [16, 17]. The benchmark networks we selected are ResNet18, ResNet50, ResNet101 and ResNet152 without pre-trained parameters [20]. + +In all experiments, instead of introducing more advanced optimizers or training procedures such as [21], we selected the Adam optimizer (momentum $\beta_{1} = 0.5$ , $\beta_{2} = 0.999$ , initial learning rate of 0.001) to align with the settings of other data augmentation methods [16, 17, 22]. For our setting of the style augmentation parameter, we selected the style interpolation parameter $\alpha$ as 0.5 and augmentation ratio as 1:1 [16]. All experiments are trained on RTX 4090 with 100 epochs. + +1) Original dataset without any advanced data augmentation techniques. +2) Dataset with naive data augmentation by simply copying and stacking the original dataset to match the same augmentation ratio as other groups, along with some routine augmentation operations. +3) Dataset with random style replacement by subregion. +4) Dataset with random style replacement at the pixel level (with an independent probability $p = 0.5$ ). + +# B. Classification Evaluation + +We evaluated our proposed data augmentation technique on the STL-10 dataset, with only 5,000 training images and 8,000 test color images. After the augmentation process, as mentioned in previous sections, the size of the augmented training set will double to 10,000 and the corresponding augmentation treatment will be randomly applied accordingly. We applied the same training settings as prior work, whose effectiveness in strategy and hyperparameter selection, including learning rate, has already been verified. + +Our method achieved $81.6\%$ classification accuracy in just 100 training epochs, as shown in Fig. 2. This result is + +![](images/7e026a64b50dfe4054d7a664322f122990eca0662d52b4f950f3efd2e0769e15.jpg) +Fig. 2: Classification Accuracy of ResNets on STL-10 test set. "None" represents original dataset. "Naive" represents dataset with naive data augmentation by simply stacking the original dataset. "Pixel" represents dataset with random style replacement at the pixel level. "Subregion" represents dataset with random style replacement by subregion. + +both faster and more accurate than the $80.8\%$ accuracy after 100,000 epochs reported by Jackson et al. [16], highlighting our approach's efficiency and scalability. + +We also tested our data augmentation technique across various network architectures including ResNet18, ResNet50, ResNet101, and ResNet152, where it consistently outperformed others, demonstrating its robustness and versatility for a wide range of computer vision tasks. + +Furthermore, our findings support those of Zhong et al. [17], who found that erasing entire subregions is more effective than pixel-level erasing. Similarly, our data shows that random style replacement within subregions is a superior augmentation strategy, enhancing the training data's representational richness and contributing to faster model convergence and improved performance. This strategy maintains structural integrity and introduces variations that reflect the natural diversity of real-world datasets. + +![](images/54b898d8583a61f6a009a134ef5de79591a265e8ff8c71a6dae49cdf78b845e9.jpg) +Fig. 3: Loss of ResNets on STL-10 test set. "None" represents original dataset. "Naive" represents dataset with naive data augmentation by simply stacking the original dataset. "Pixel" represents dataset with random style replacement at the pixel level. "Subregion" represents dataset with random style replacement by subregion. + +![](images/fe8a4d9da32436ee4f701ac6a4674312ac9234f40e25d7c3bd7ea6a90ef3413b.jpg) + +To confirm the effectiveness of our data augmentation technique, we analyzed the test loss of each method on the STL-10 test set, as shown in Fig. 3. In contrast to the naive + +dataset, whose test loss stops converging after the 20th epoch, all augmented datasets show improved convergence speed and reduced loss variability. Notably, the style augmentation strategy that randomly replaces subregions achieves the fastest convergence and the most stable training process. Despite varying effectiveness in stabilizing training loss across ResNets, our method's performance remains consistently stable. + +# V. CONCLUSIONS + +In conclusion, our random style replacement strategy offers a practical and scalable data augmentation solution for the STL-10 dataset and beyond. By innovatively combining [16] and [17], our proposed data augmentation framework demonstrates superior performance and achieves faster convergence. By randomly replacing subregions rather than individual pixels, we preserve critical structural information while introducing meaningful variability, resulting in faster training convergence and higher accuracy. Our experiments with multiple ResNet architectures consistently verify the robustness of this method, showcasing its versatility for diverse computer vision applications. + +# VI. FUTURE WORK + +Our random style replacement method has shown promising results, yet further validation is needed to confirm its wider applicability. It is crucial to test this technique across various datasets and tasks to establish its generalizability and identify any limitations. Additionally, the convergence speed observations require confirmation through further experiments involving diverse datasets and network architectures. Moreover, integrating Large Language Models (LLMs) guided approaches [23] could enhance the method. These approaches would use LLMs to guide style replacement, potentially selecting optimal subregions for style transfer based on foreground and background information, thus enabling more meaningful and effective transformations. + +# REFERENCES + +[1] H.-C. Dan, Z. Huang, B. Lu, and M. Li, "Image-driven prediction system: Automatic extraction of aggregate gradation of pavement core samples integrating deep learning and interactive image processing framework," Construction and Building Materials, 2024. +[2] P. Li et al., "Contextual hourglass network for semantic segmentation of high resolution aerial imagery," in ICECAI. IEEE, 2024, pp. 15-18. +[3] A. Coates, A. Ng, and H. Lee, "An analysis of single-layer networks in unsupervised feature learning," in PMLR, 2011, pp. 215-223. +[4] T. Hastie et al., "Unsupervised learning," The elements of statistical learning: Data mining, inference, and prediction, pp. 485-585, 2009. +[5] Q. Yang et al., "A comparative study on enhancing prediction in social network advertisement through data augmentation," in MLISE, 2024. +[6] K. Ding et al., "Data augmentation for deep graph learning: A survey," ACM SIGKDD Explorations Newsletter, vol. 24, no. 2, pp. 61-77, 2022. +[7] T. Kumar et al., "Image data augmentation approaches: A comprehensive survey and future directions," IEEE Access, 2024. +[8] M. Bayer et al., "Data augmentation in natural language processing: a novel text generation approach for long and short text classifiers," *IJMLC*, vol. 14, no. 1, p. 135-150, Apr. 2022. +[9] A. Sankar et al., "Self-supervised role learning for graph neural networks," Knowledge and Information Systems, 2022. +[10] K. Narang et al., “Ranking user-generated content via multi-relational graph convolution,” in SIGIR, 2021, pp. 470–480. + +[11] Y. Jin et al., "Representation and extraction of diesel engine maintenance knowledge graph with bidirectional relations based on bert and the bilstm-crf model," in ICEBE. IEEE, 2021, pp. 126-133. +[12] S. Li, H. Xu, and H. Chen, “Focused react: Improving react through reiterate and early stop,” in WiNLP Workshop, 2024. +[13] D. P. Kingma and M. Welling, "Auto-encoding variational bayes," arXiv preprint arXiv:1312.6114, 2013. +[14] I. J. Goodfellow et al., "Generative adversarial networks," arXiv, 2014. +[15] J. Ho et al., “Denoising diffusion probabilistic models,” Advances in neural information processing systems, vol. 33, pp. 6840–6851, 2020. +[16] P. T. Jackson et al., "Style augmentation: data augmentation via style randomization." in CVPR workshops, vol. 6, 2019, pp. 10-11. +[17] Z. Zhong et al., “Random erasing data augmentation,” in AAAI, vol. 34, no. 07, 2020, pp. 13001-13008. +[18] X. Xu et al., "Style transfer: From stitching to neural networks," in ICBASE. IEEE, 2024, pp. 526-530. +[19] Z. Ding, P. Li, Q. Yang, S. Li, and Q. Gong, “Regional style and color transfer,” in CVIDL. IEEE, 2024, pp. 593-597. +[20] K. He et al., “Deep residual learning for image recognition,” in CVPR, 2016, pp. 770–778. +[21] Q. Xu et al., "A stochastic gda method with backtracking for solving nonconvex (strongly) concave minimax problems," arXiv, 2024. +[22] D. P. Kingma and J. Ba, “Adam: A method for stochastic optimization,” arXiv preprint arXiv:1412.6980, 2014. +[23] Z. Ding, P. Li, Q. Yang, and S. Li, "Enhance image-to-image generation with llava-generated prompts," in ISPDS. IEEE, 2024, pp. 77-81. \ No newline at end of file diff --git a/data/2025/2504_10xxx/2504.10563/images/12af2ef443c3e140f0e94ec1a27d1d3fb16f508cdfa26628f5af80f4259f59f2.jpg b/data/2025/2504_10xxx/2504.10563/images/12af2ef443c3e140f0e94ec1a27d1d3fb16f508cdfa26628f5af80f4259f59f2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..19ae218f7c91f9d028430aae5718c4b9a3ecf15a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10563/images/12af2ef443c3e140f0e94ec1a27d1d3fb16f508cdfa26628f5af80f4259f59f2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79a8512975427133e26760500762edf7cdbdf26fc9512a7e5226f485026b229e +size 11184 diff --git a/data/2025/2504_10xxx/2504.10563/images/395de3fb724960ffe2d001e45b4c0adb878d66ece621d43806a6d2e1bf672551.jpg b/data/2025/2504_10xxx/2504.10563/images/395de3fb724960ffe2d001e45b4c0adb878d66ece621d43806a6d2e1bf672551.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e2f11e7a318360a3d872179eba6d939cbca26f06 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10563/images/395de3fb724960ffe2d001e45b4c0adb878d66ece621d43806a6d2e1bf672551.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dfe436c186792710b44148ce6f0028392163bef25958264b8140faac3f24ab11 +size 13594 diff --git a/data/2025/2504_10xxx/2504.10563/images/54b898d8583a61f6a009a134ef5de79591a265e8ff8c71a6dae49cdf78b845e9.jpg b/data/2025/2504_10xxx/2504.10563/images/54b898d8583a61f6a009a134ef5de79591a265e8ff8c71a6dae49cdf78b845e9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6ad7b074c488636610d987903a037e11944b64ef --- /dev/null +++ b/data/2025/2504_10xxx/2504.10563/images/54b898d8583a61f6a009a134ef5de79591a265e8ff8c71a6dae49cdf78b845e9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db61f0ea26ef207e59268a6dd749699e3037d43991ddbcd98e7484430f41b179 +size 18977 diff --git a/data/2025/2504_10xxx/2504.10563/images/5c95982de21f32060fcf87daeb2846c2993baf8fe7dc6d6dbed286d6a6f7d61c.jpg b/data/2025/2504_10xxx/2504.10563/images/5c95982de21f32060fcf87daeb2846c2993baf8fe7dc6d6dbed286d6a6f7d61c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f9fdf891162d3f9918e18679e6a28aa5ab6e145d --- /dev/null +++ b/data/2025/2504_10xxx/2504.10563/images/5c95982de21f32060fcf87daeb2846c2993baf8fe7dc6d6dbed286d6a6f7d61c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44f19fd243b8e2101005d49a4843f5ae4aec9db9b346a1430521f1ac7ccd3066 +size 10535 diff --git a/data/2025/2504_10xxx/2504.10563/images/7e026a64b50dfe4054d7a664322f122990eca0662d52b4f950f3efd2e0769e15.jpg b/data/2025/2504_10xxx/2504.10563/images/7e026a64b50dfe4054d7a664322f122990eca0662d52b4f950f3efd2e0769e15.jpg new file mode 100644 index 0000000000000000000000000000000000000000..712a65be593f7425c616759074caa07e43f69b6c --- /dev/null +++ b/data/2025/2504_10xxx/2504.10563/images/7e026a64b50dfe4054d7a664322f122990eca0662d52b4f950f3efd2e0769e15.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4dfba48ae880232d50a18e91e7eb3b025612e41556b65dd37f9d4ace54adf040 +size 20232 diff --git a/data/2025/2504_10xxx/2504.10563/images/9d8bb25c9129de4886dd493710bdc1ad34676fd2a94cf035d3c30cea079c87ff.jpg b/data/2025/2504_10xxx/2504.10563/images/9d8bb25c9129de4886dd493710bdc1ad34676fd2a94cf035d3c30cea079c87ff.jpg new file mode 100644 index 0000000000000000000000000000000000000000..16f68d97a7fc7363635014054dded5b57d76d72a --- /dev/null +++ b/data/2025/2504_10xxx/2504.10563/images/9d8bb25c9129de4886dd493710bdc1ad34676fd2a94cf035d3c30cea079c87ff.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d649a9153fe07cce2d814475801f8618c6f1033e4c650b0eda95726b61a63ff +size 11643 diff --git a/data/2025/2504_10xxx/2504.10563/images/c1f4fc762f4606a8c1f77f5930372624fb1fef7cea8ef144d64d24a0642d444f.jpg b/data/2025/2504_10xxx/2504.10563/images/c1f4fc762f4606a8c1f77f5930372624fb1fef7cea8ef144d64d24a0642d444f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0355ede025dc5b744679781de3b06aaa43ec57ef --- /dev/null +++ b/data/2025/2504_10xxx/2504.10563/images/c1f4fc762f4606a8c1f77f5930372624fb1fef7cea8ef144d64d24a0642d444f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6fabf024994bb3f3cf01f09ecd7c9543897e2041f43d29aef4657168aeda5360 +size 9905 diff --git a/data/2025/2504_10xxx/2504.10563/images/fe8a4d9da32436ee4f701ac6a4674312ac9234f40e25d7c3bd7ea6a90ef3413b.jpg b/data/2025/2504_10xxx/2504.10563/images/fe8a4d9da32436ee4f701ac6a4674312ac9234f40e25d7c3bd7ea6a90ef3413b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..226e886e52c8e207194c751d9d52327a75b9e281 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10563/images/fe8a4d9da32436ee4f701ac6a4674312ac9234f40e25d7c3bd7ea6a90ef3413b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:230149f4c60bbdedfef9f63ea7939f9fe13a835fceb0ac811bd54c5087f75416 +size 21153 diff --git a/data/2025/2504_10xxx/2504.10563/layout.json b/data/2025/2504_10xxx/2504.10563/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..73a5631158eee539e04bcc10885e094087f00d13 --- /dev/null +++ b/data/2025/2504_10xxx/2504.10563/layout.json @@ -0,0 +1,4884 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 90, + 57, + 522, + 111 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 57, + 522, + 111 + ], + "spans": [ + { + "bbox": [ + 90, + 57, + 522, + 111 + ], + "type": "text", + "content": "Data Augmentation Through Random Style Replacement" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 135, + 128, + 194, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 128, + 194, + 140 + ], + "spans": [ + { + "bbox": [ + 135, + 128, + 194, + 140 + ], + "type": "inline_equation", + "content": "1^{\\mathrm{st}}" + }, + { + "bbox": [ + 135, + 128, + 194, + 140 + ], + "type": "text", + "content": " Qikai Yang\\*" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 60, + 140, + 269, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 140, + 269, + 149 + ], + "spans": [ + { + "bbox": [ + 60, + 140, + 269, + 149 + ], + "type": "text", + "content": "University of Illinois Urbana-Champaign, Urbana, USA" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 81, + 149, + 248, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 149, + 248, + 160 + ], + "spans": [ + { + "bbox": [ + 81, + 149, + 248, + 160 + ], + "type": "text", + "content": "*Corresponding Author: qikaiy2@illinois.edu" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 131, + 170, + 197, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 170, + 197, + 181 + ], + "spans": [ + { + "bbox": [ + 131, + 170, + 197, + 181 + ], + "type": "inline_equation", + "content": "3^{\\text{th}}" + }, + { + "bbox": [ + 131, + 170, + 197, + 181 + ], + "type": "text", + "content": " Huaiying Luo" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 181, + 224, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 181, + 224, + 191 + ], + "spans": [ + { + "bbox": [ + 104, + 181, + 224, + 191 + ], + "type": "text", + "content": "Cornell University, Ithaca, USA" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 418, + 128, + 467, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 418, + 128, + 467, + 139 + ], + "spans": [ + { + "bbox": [ + 418, + 128, + 467, + 139 + ], + "type": "inline_equation", + "content": "2^{\\mathrm{nd}}" + }, + { + "bbox": [ + 418, + 128, + 467, + 139 + ], + "type": "text", + "content": " Cheng Ji" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 339, + 140, + 547, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 339, + 140, + 547, + 149 + ], + "spans": [ + { + "bbox": [ + 339, + 140, + 547, + 149 + ], + "type": "text", + "content": "University of Illinois Urbana-Champaign, Urbana, USA" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 414, + 170, + 470, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 414, + 170, + 470, + 181 + ], + "spans": [ + { + "bbox": [ + 414, + 170, + 470, + 181 + ], + "type": "inline_equation", + "content": "4^{\\mathrm{th}}" + }, + { + "bbox": [ + 414, + 170, + 470, + 181 + ], + "type": "text", + "content": " Panfeng Li" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 365, + 181, + 520, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 365, + 181, + 520, + 191 + ], + "spans": [ + { + "bbox": [ + 365, + 181, + 520, + 191 + ], + "type": "text", + "content": "University of Michigan, Ann Arbor, USA" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 268, + 201, + 339, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 268, + 201, + 339, + 212 + ], + "spans": [ + { + "bbox": [ + 268, + 201, + 339, + 212 + ], + "type": "inline_equation", + "content": "5^{\\mathrm{th}}" + }, + { + "bbox": [ + 268, + 201, + 339, + 212 + ], + "type": "text", + "content": " Zhicheng Ding" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 232, + 212, + 375, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 232, + 212, + 375, + 221 + ], + "spans": [ + { + "bbox": [ + 232, + 212, + 375, + 221 + ], + "type": "text", + "content": "Columbia University, New York, USA" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 45, + 248, + 301, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 248, + 301, + 388 + ], + "spans": [ + { + "bbox": [ + 45, + 248, + 301, + 388 + ], + "type": "text", + "content": "Abstract-In this paper, we introduce a novel data augmentation technique that combines the advantages of style augmentation and random erasing by selectively replacing image subregions with style-transferred patches. Our approach first applies a random style transfer to training images, then randomly substitutes selected areas of these images with patches derived from the style-transferred versions. This method is able to seamlessly accommodate a wide range of existing style transfer algorithms and can be readily integrated into diverse data augmentation pipelines. By incorporating our strategy, the training process becomes more robust and less prone to overfitting. Comparative experiments demonstrate that, relative to previous style augmentation methods, our technique achieves superior performance and faster convergence." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 45, + 388, + 300, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 388, + 300, + 409 + ], + "spans": [ + { + "bbox": [ + 45, + 388, + 300, + 409 + ], + "type": "text", + "content": "Index Terms—Data Augmentation, Style Transfer, Style Augmentation" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 133, + 422, + 214, + 432 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 422, + 214, + 432 + ], + "spans": [ + { + "bbox": [ + 133, + 422, + 214, + 432 + ], + "type": "text", + "content": "I. INTRODUCTION" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 45, + 438, + 300, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 438, + 300, + 569 + ], + "spans": [ + { + "bbox": [ + 45, + 438, + 300, + 569 + ], + "type": "text", + "content": "Recent advancements in deep learning have driven significant progress in a wide range of computer vision tasks, including image classification, object detection, and semantic segmentation [1, 2]. Despite these advances, many of these tasks continue to face a fundamental bottleneck: a lack of sufficient labeled data [3, 4]. Annotating large-scale datasets is both time-consuming and costly, which can limit the applicability of deep neural networks in specialized or rapidly evolving domains. To mitigate this issue, data augmentation techniques are heavily utilized, artificially expanding and diversifying the training set so that models generalize more effectively." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 45, + 571, + 301, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 571, + 301, + 727 + ], + "spans": [ + { + "bbox": [ + 45, + 571, + 301, + 727 + ], + "type": "text", + "content": "Data augmentation [5] has garnered significant attention in supervised learning research across a wide range of domains—including computer vision [6, 7], natural language processing [8], graph learning [9, 10], and large language model [11, 12] — due to its ability to increase both the volume and diversity of training data, thereby enhancing model generalization and mitigating overfitting. Broadly, data augmentation strategies can be grouped into two categories: generative methods, which utilize models like Variational Autoencoders (VAEs) [13], Generative Adversarial Networks (GANs) [14], Large Language Models (LLMs), or diffusion-based frameworks [15] to synthesize new data; and traditional methods, which rely on transformations such as random cropping, flip" + } + ] + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 310, + 243, + 360, + 360 + ], + "blocks": [ + { + "bbox": [ + 310, + 243, + 360, + 360 + ], + "lines": [ + { + "bbox": [ + 310, + 243, + 360, + 360 + ], + "spans": [ + { + "bbox": [ + 310, + 243, + 360, + 360 + ], + "type": "image", + "image_path": "5c95982de21f32060fcf87daeb2846c2993baf8fe7dc6d6dbed286d6a6f7d61c.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 329, + 365, + 341, + 374 + ], + "lines": [ + { + "bbox": [ + 329, + 365, + 341, + 374 + ], + "spans": [ + { + "bbox": [ + 329, + 365, + 341, + 374 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 361, + 244, + 410, + 360 + ], + "blocks": [ + { + "bbox": [ + 361, + 244, + 410, + 360 + ], + "lines": [ + { + "bbox": [ + 361, + 244, + 410, + 360 + ], + "spans": [ + { + "bbox": [ + 361, + 244, + 410, + 360 + ], + "type": "image", + "image_path": "c1f4fc762f4606a8c1f77f5930372624fb1fef7cea8ef144d64d24a0642d444f.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 380, + 365, + 392, + 374 + ], + "lines": [ + { + "bbox": [ + 380, + 365, + 392, + 374 + ], + "spans": [ + { + "bbox": [ + 380, + 365, + 392, + 374 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 412, + 244, + 462, + 360 + ], + "blocks": [ + { + "bbox": [ + 412, + 244, + 462, + 360 + ], + "lines": [ + { + "bbox": [ + 412, + 244, + 462, + 360 + ], + "spans": [ + { + "bbox": [ + 412, + 244, + 462, + 360 + ], + "type": "image", + "image_path": "395de3fb724960ffe2d001e45b4c0adb878d66ece621d43806a6d2e1bf672551.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 432, + 365, + 443, + 374 + ], + "lines": [ + { + "bbox": [ + 432, + 365, + 443, + 374 + ], + "spans": [ + { + "bbox": [ + 432, + 365, + 443, + 374 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 463, + 244, + 512, + 360 + ], + "blocks": [ + { + "bbox": [ + 463, + 244, + 512, + 360 + ], + "lines": [ + { + "bbox": [ + 463, + 244, + 512, + 360 + ], + "spans": [ + { + "bbox": [ + 463, + 244, + 512, + 360 + ], + "type": "image", + "image_path": "12af2ef443c3e140f0e94ec1a27d1d3fb16f508cdfa26628f5af80f4259f59f2.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 482, + 365, + 493, + 374 + ], + "lines": [ + { + "bbox": [ + 482, + 365, + 493, + 374 + ], + "spans": [ + { + "bbox": [ + 482, + 365, + 493, + 374 + ], + "type": "text", + "content": "(d)" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 308, + 380, + 564, + 440 + ], + "lines": [ + { + "bbox": [ + 308, + 380, + 564, + 440 + ], + "spans": [ + { + "bbox": [ + 308, + 380, + 564, + 440 + ], + "type": "text", + "content": "Fig. 1: Examples of random style transfer: we generate a style-transferred image and use it to patch original image in different ways (a) Input Image. (b) Random-Region-Erased Image. (c) Style Transfer. (d) Random Style Replacement. (e) Random Style Replacement." + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 515, + 244, + 563, + 360 + ], + "blocks": [ + { + "bbox": [ + 515, + 244, + 563, + 360 + ], + "lines": [ + { + "bbox": [ + 515, + 244, + 563, + 360 + ], + "spans": [ + { + "bbox": [ + 515, + 244, + 563, + 360 + ], + "type": "image", + "image_path": "9d8bb25c9129de4886dd493710bdc1ad34676fd2a94cf035d3c30cea079c87ff.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 533, + 365, + 544, + 374 + ], + "lines": [ + { + "bbox": [ + 533, + 365, + 544, + 374 + ], + "spans": [ + { + "bbox": [ + 533, + 365, + 544, + 374 + ], + "type": "text", + "content": "(e)" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 472, + 564, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 472, + 564, + 663 + ], + "spans": [ + { + "bbox": [ + 307, + 472, + 564, + 663 + ], + "type": "text", + "content": "ping, rotations, color jittering, and histogram equalization to modify existing samples. While both approaches aim to expose the model to a wider variety of conditions, thus reducing overfitting, traditional augmentation strategies may not fully capture the complexity of real-world variability. Consequently, several studies have explored more refined methods, including style augmentation and random erasing [16-18]. Style augmentation employs style transfer [19] to alter the visual attributes of training images while preserving their semantic content, thereby increasing robustness to differences in texture, color, and contrast. Random erasing, on the other hand, randomly occludes or replaces subregions of an image, making models more resilient to missing or corrupted information. In this paper, we revisit these traditional approaches—particularly focusing on their potential to advance the efficacy of data augmentation in supervised learning." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 666, + 564, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 666, + 564, + 727 + ], + "spans": [ + { + "bbox": [ + 307, + 666, + 564, + 727 + ], + "type": "text", + "content": "In this paper, we introduce a novel data augmentation method that merges style augmentation with random erasing. Our approach involves applying a random style transfer to an image, followed by replacing specific subregions with patches from the style-transferred version. This technique enhances" + } + ] + } + ], + "index": 30 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 221, + 35, + 568 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 221, + 35, + 568 + ], + "spans": [ + { + "bbox": [ + 14, + 221, + 35, + 568 + ], + "type": "text", + "content": "arXiv:2504.10563v2 [cs.CV] 18 Jun 2025" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 56, + 300, + 104 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 56, + 300, + 104 + ], + "spans": [ + { + "bbox": [ + 45, + 56, + 300, + 104 + ], + "type": "text", + "content": "robustness against style variations and occlusion-like effects. It integrates smoothly with existing style transfer frameworks and fits easily into standard data augmentation pipelines, as illustrated in Figure 1." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 105, + 264, + 116 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 105, + 264, + 116 + ], + "spans": [ + { + "bbox": [ + 55, + 105, + 264, + 116 + ], + "type": "text", + "content": "The main contributions of our work are as below:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 119, + 301, + 239 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 53, + 119, + 301, + 155 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 119, + 301, + 155 + ], + "spans": [ + { + "bbox": [ + 53, + 119, + 301, + 155 + ], + "type": "text", + "content": "1) We propose a technique that merges style augmentation and random erasing, offering benefits from both texture variation and structured occlusion." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 155, + 301, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 155, + 301, + 239 + ], + "spans": [ + { + "bbox": [ + 52, + 155, + 301, + 239 + ], + "type": "text", + "content": "2) We demonstrate through experiments that our approach reduces the risk of overfitting while achieving faster convergence compared to established style augmentation methods. Ease of Integration: Our strategy is parameter-free and can be readily adapted to a broad spectrum of computer vision tasks, making it a highly practical solution for data augmentation." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 45, + 241, + 301, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 241, + 301, + 289 + ], + "spans": [ + { + "bbox": [ + 45, + 241, + 301, + 289 + ], + "type": "text", + "content": "By leveraging this new augmentation method, we observe notable gains in model performance across different tasks, highlighting its potential to address the persistent challenge of limited labeled data in computer vision research." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 144, + 298, + 202, + 309 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 144, + 298, + 202, + 309 + ], + "spans": [ + { + "bbox": [ + 144, + 298, + 202, + 309 + ], + "type": "text", + "content": "II. DATASET" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 315, + 300, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 315, + 300, + 422 + ], + "spans": [ + { + "bbox": [ + 45, + 315, + 300, + 422 + ], + "type": "text", + "content": "We tested our random style replacement method on the STL-10 dataset, which includes 5,000 training images and 8,000 test images, each with a resolution of " + }, + { + "bbox": [ + 45, + 315, + 300, + 422 + ], + "type": "inline_equation", + "content": "96 \\times 96" + }, + { + "bbox": [ + 45, + 315, + 300, + 422 + ], + "type": "text", + "content": " pixels across 10 classes [3]. We chose STL-10 due to its complex backgrounds and high resolution, which pose a substantial challenge for image classification, making it a robust benchmark. Additionally, the limited size of the training set highlights the effectiveness of our data augmentation technique in enhancing training data." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 140, + 432, + 206, + 443 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 432, + 206, + 443 + ], + "spans": [ + { + "bbox": [ + 140, + 432, + 206, + 443 + ], + "type": "text", + "content": "III. METHODS" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 449, + 300, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 449, + 300, + 486 + ], + "spans": [ + { + "bbox": [ + 45, + 449, + 300, + 486 + ], + "type": "text", + "content": "This sections introduces our random style replacement method in details. We described the overall process of random style replacement and explain how we perform image patch." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 495, + 175, + 506 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 495, + 175, + 506 + ], + "spans": [ + { + "bbox": [ + 45, + 495, + 175, + 506 + ], + "type": "text", + "content": "A. Random Style Replacement" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 45, + 510, + 300, + 558 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 510, + 300, + 558 + ], + "spans": [ + { + "bbox": [ + 45, + 510, + 300, + 558 + ], + "type": "text", + "content": "During training, random style replacement is applied with a certain probability " + }, + { + "bbox": [ + 45, + 510, + 300, + 558 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 45, + 510, + 300, + 558 + ], + "type": "text", + "content": ": for each image " + }, + { + "bbox": [ + 45, + 510, + 300, + 558 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 45, + 510, + 300, + 558 + ], + "type": "text", + "content": " in a mini-batch, there's a probability " + }, + { + "bbox": [ + 45, + 510, + 300, + 558 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 45, + 510, + 300, + 558 + ], + "type": "text", + "content": " that it undergoes style replacement and a probability " + }, + { + "bbox": [ + 45, + 510, + 300, + 558 + ], + "type": "inline_equation", + "content": "1 - p" + }, + { + "bbox": [ + 45, + 510, + 300, + 558 + ], + "type": "text", + "content": " that it remains unchanged." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 45, + 559, + 301, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 559, + 301, + 618 + ], + "spans": [ + { + "bbox": [ + 45, + 559, + 301, + 618 + ], + "type": "text", + "content": "If selected, the image will be transformed into a new version with a partial style change. This random style replacement process consists of two steps: generating a complete-style-transferred image and merging it with the original image by certain patching methods. The procedure is shown in Alg. 1." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 45, + 618, + 301, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 618, + 301, + 727 + ], + "spans": [ + { + "bbox": [ + 45, + 618, + 301, + 727 + ], + "type": "text", + "content": "Style transfer refers to a class of image processing algorithms that alter the visual style of an image while preserving its semantic content. For style transfer to be part of a data augmentation technique, it needs to be a both fast and random algorithm capable of applying a broad range of styles. Therefore, we adopt the approach of Jackson et al., which efficiently generates a completely style-transferred image by incorporating randomness on the fly without requiring heavy computations [16]." + } + ] + } + ], + "index": 13 + }, + { + "type": "code", + "bbox": [ + 310, + 70, + 485, + 228 + ], + "blocks": [ + { + "bbox": [ + 315, + 57, + 534, + 68 + ], + "lines": [ + { + "bbox": [ + 315, + 57, + 534, + 68 + ], + "spans": [ + { + "bbox": [ + 315, + 57, + 534, + 68 + ], + "type": "text", + "content": "Algorithm 1: Random Style Replacement Procedure" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 310, + 70, + 485, + 228 + ], + "lines": [ + { + "bbox": [ + 310, + 70, + 485, + 228 + ], + "spans": [ + { + "bbox": [ + 310, + 70, + 485, + 228 + ], + "type": "text", + "content": "Input: Input image " + }, + { + "bbox": [ + 310, + 70, + 485, + 228 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 310, + 70, + 485, + 228 + ], + "type": "text", + "content": " Augmentation probability " + }, + { + "bbox": [ + 310, + 70, + 485, + 228 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 310, + 70, + 485, + 228 + ], + "type": "text", + "content": " Patch mode pMode; Output: Augmented image " + }, + { + "bbox": [ + 310, + 70, + 485, + 228 + ], + "type": "inline_equation", + "content": "I^{*}" + }, + { + "bbox": [ + 310, + 70, + 485, + 228 + ], + "type": "text", + "content": " Initialization: " + }, + { + "bbox": [ + 310, + 70, + 485, + 228 + ], + "type": "inline_equation", + "content": "p_1\\gets \\mathrm{Rand}(0,1)" + }, + { + "bbox": [ + 310, + 70, + 485, + 228 + ], + "type": "text", + "content": " 1 if " + }, + { + "bbox": [ + 310, + 70, + 485, + 228 + ], + "type": "inline_equation", + "content": "p_1\\geq p" + }, + { + "bbox": [ + 310, + 70, + 485, + 228 + ], + "type": "text", + "content": " then 2 " + }, + { + "bbox": [ + 310, + 70, + 485, + 228 + ], + "type": "inline_equation", + "content": "I^{*}\\gets I" + }, + { + "bbox": [ + 310, + 70, + 485, + 228 + ], + "type": "text", + "content": " 3 return " + }, + { + "bbox": [ + 310, + 70, + 485, + 228 + ], + "type": "inline_equation", + "content": "I^{*}" + }, + { + "bbox": [ + 310, + 70, + 485, + 228 + ], + "type": "text", + "content": " 4 else 5 " + }, + { + "bbox": [ + 310, + 70, + 485, + 228 + ], + "type": "inline_equation", + "content": "I^{\\prime}\\gets" + }, + { + "bbox": [ + 310, + 70, + 485, + 228 + ], + "type": "text", + "content": " randomStyleTransfer(I); 6 " + }, + { + "bbox": [ + 310, + 70, + 485, + 228 + ], + "type": "inline_equation", + "content": "I^{*}\\gets" + }, + { + "bbox": [ + 310, + 70, + 485, + 228 + ], + "type": "text", + "content": " randomPatch(I,I',pMode); 7 return " + }, + { + "bbox": [ + 310, + 70, + 485, + 228 + ], + "type": "inline_equation", + "content": "I^{*}" + }, + { + "bbox": [ + 310, + 70, + 485, + 228 + ], + "type": "text", + "content": " 8 end" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "code_body" + } + ], + "index": 15, + "sub_type": "algorithm" + }, + { + "bbox": [ + 308, + 258, + 564, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 258, + 564, + 354 + ], + "spans": [ + { + "bbox": [ + 308, + 258, + 564, + 354 + ], + "type": "text", + "content": "The generated style-transferred image will then be used to patch the original image, creating an augmented image. There are multiple patching methods, and we adopt the two most common ones: patching by a random subregion and patching randomly selecting individual pixels. To avoid bias in data augmentation, we employed random style transferring to ensure diverse and uniform modifications across all image types, enhancing model generalization." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 309, + 364, + 385, + 374 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 364, + 385, + 374 + ], + "spans": [ + { + "bbox": [ + 309, + 364, + 385, + 374 + ], + "type": "text", + "content": "B. Random Patch" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 380, + 563, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 380, + 563, + 464 + ], + "spans": [ + { + "bbox": [ + 307, + 380, + 563, + 464 + ], + "type": "text", + "content": "Random patch is to patch a image based on another image. Here, we provided a detailed explanation of random patch by subregion. This method copies a randomly selected region from the style-transferred image onto the original image. Specifically, it randomly selects a rectangle region " + }, + { + "bbox": [ + 307, + 380, + 563, + 464 + ], + "type": "inline_equation", + "content": "I_{e}" + }, + { + "bbox": [ + 307, + 380, + 563, + 464 + ], + "type": "text", + "content": " within the image and overwrite all its pixels with those from the style-transferred image." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 464, + 563, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 464, + 563, + 564 + ], + "spans": [ + { + "bbox": [ + 308, + 464, + 563, + 564 + ], + "type": "text", + "content": "Firstly we will determine the shape of the patching area " + }, + { + "bbox": [ + 308, + 464, + 563, + 564 + ], + "type": "inline_equation", + "content": "I_{e}" + }, + { + "bbox": [ + 308, + 464, + 563, + 564 + ], + "type": "text", + "content": ". Assume the training image has dimensions " + }, + { + "bbox": [ + 308, + 464, + 563, + 564 + ], + "type": "inline_equation", + "content": "W \\times H" + }, + { + "bbox": [ + 308, + 464, + 563, + 564 + ], + "type": "text", + "content": " and an area " + }, + { + "bbox": [ + 308, + 464, + 563, + 564 + ], + "type": "inline_equation", + "content": "S = W \\times H" + }, + { + "bbox": [ + 308, + 464, + 563, + 564 + ], + "type": "text", + "content": ". We randomly initialize the area of the patched rectangle region to " + }, + { + "bbox": [ + 308, + 464, + 563, + 564 + ], + "type": "inline_equation", + "content": "S_{e}" + }, + { + "bbox": [ + 308, + 464, + 563, + 564 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 308, + 464, + 563, + 564 + ], + "type": "inline_equation", + "content": "\\frac{S_{e}}{S}" + }, + { + "bbox": [ + 308, + 464, + 563, + 564 + ], + "type": "text", + "content": " falls within the range defined by the minimum " + }, + { + "bbox": [ + 308, + 464, + 563, + 564 + ], + "type": "inline_equation", + "content": "s_{l}" + }, + { + "bbox": [ + 308, + 464, + 563, + 564 + ], + "type": "text", + "content": " and maximum " + }, + { + "bbox": [ + 308, + 464, + 563, + 564 + ], + "type": "inline_equation", + "content": "s_{h}" + }, + { + "bbox": [ + 308, + 464, + 563, + 564 + ], + "type": "text", + "content": ". Similarly, the aspect ratio of the rectangle region, denoted as " + }, + { + "bbox": [ + 308, + 464, + 563, + 564 + ], + "type": "inline_equation", + "content": "r_{e}" + }, + { + "bbox": [ + 308, + 464, + 563, + 564 + ], + "type": "text", + "content": ", is randomly chosen between " + }, + { + "bbox": [ + 308, + 464, + 563, + 564 + ], + "type": "inline_equation", + "content": "r_{l}" + }, + { + "bbox": [ + 308, + 464, + 563, + 564 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 308, + 464, + 563, + 564 + ], + "type": "inline_equation", + "content": "r_{h}" + }, + { + "bbox": [ + 308, + 464, + 563, + 564 + ], + "type": "text", + "content": ". Given those, the dimensions of " + }, + { + "bbox": [ + 308, + 464, + 563, + 564 + ], + "type": "inline_equation", + "content": "I_{e}" + }, + { + "bbox": [ + 308, + 464, + 563, + 564 + ], + "type": "text", + "content": " are computed as " + }, + { + "bbox": [ + 308, + 464, + 563, + 564 + ], + "type": "inline_equation", + "content": "H_{e} = \\sqrt{S_{e} \\times r_{e}}" + }, + { + "bbox": [ + 308, + 464, + 563, + 564 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 308, + 464, + 563, + 564 + ], + "type": "inline_equation", + "content": "W_{e} = \\sqrt{\\frac{S_{e}}{r_{e}}}" + }, + { + "bbox": [ + 308, + 464, + 563, + 564 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 563, + 564, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 563, + 564, + 647 + ], + "spans": [ + { + "bbox": [ + 307, + 563, + 564, + 647 + ], + "type": "text", + "content": "Next, we randomly select a point " + }, + { + "bbox": [ + 307, + 563, + 564, + 647 + ], + "type": "inline_equation", + "content": "\\mathcal{P} = (x_e, y_e)" + }, + { + "bbox": [ + 307, + 563, + 564, + 647 + ], + "type": "text", + "content": " within " + }, + { + "bbox": [ + 307, + 563, + 564, + 647 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 307, + 563, + 564, + 647 + ], + "type": "text", + "content": " to serve as the lower-left corner of " + }, + { + "bbox": [ + 307, + 563, + 564, + 647 + ], + "type": "inline_equation", + "content": "I_e" + }, + { + "bbox": [ + 307, + 563, + 564, + 647 + ], + "type": "text", + "content": ". If the selected region " + }, + { + "bbox": [ + 307, + 563, + 564, + 647 + ], + "type": "inline_equation", + "content": "I_e" + }, + { + "bbox": [ + 307, + 563, + 564, + 647 + ], + "type": "text", + "content": " are completely inside " + }, + { + "bbox": [ + 307, + 563, + 564, + 647 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 307, + 563, + 564, + 647 + ], + "type": "text", + "content": " (i.e. " + }, + { + "bbox": [ + 307, + 563, + 564, + 647 + ], + "type": "inline_equation", + "content": "x_e + W_e \\leq W" + }, + { + "bbox": [ + 307, + 563, + 564, + 647 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 307, + 563, + 564, + 647 + ], + "type": "inline_equation", + "content": "y_e + H_e \\leq H" + }, + { + "bbox": [ + 307, + 563, + 564, + 647 + ], + "type": "text", + "content": "), we define it as the selected rectangular region. Otherwise, we repeat the selection process until a valid " + }, + { + "bbox": [ + 307, + 563, + 564, + 647 + ], + "type": "inline_equation", + "content": "I_e" + }, + { + "bbox": [ + 307, + 563, + 564, + 647 + ], + "type": "text", + "content": " is found. The whole procedure for selecting the rectangular region and applying the patch to original image is illustrated in Alg. 2." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 397, + 657, + 474, + 667 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 397, + 657, + 474, + 667 + ], + "spans": [ + { + "bbox": [ + 397, + 657, + 474, + 667 + ], + "type": "text", + "content": "IV. EXPERIMENT" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 673, + 408, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 673, + 408, + 685 + ], + "spans": [ + { + "bbox": [ + 308, + 673, + 408, + 685 + ], + "type": "text", + "content": "A. Experiment Settings" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 689, + 564, + 726 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 689, + 564, + 726 + ], + "spans": [ + { + "bbox": [ + 308, + 689, + 564, + 726 + ], + "type": "text", + "content": "As mentioned in previous sections, we evaluated our random style replacement method for image classification using the well-known STL-10 dataset [3]. To ensure the effectiveness" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 44, + 71, + 249, + 282 + ], + "blocks": [ + { + "bbox": [ + 51, + 57, + 231, + 69 + ], + "lines": [ + { + "bbox": [ + 51, + 57, + 231, + 69 + ], + "spans": [ + { + "bbox": [ + 51, + 57, + 231, + 69 + ], + "type": "text", + "content": "Algorithm 2: Random Patch by Subregion" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "lines": [ + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "spans": [ + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "text", + "content": "Input: Input image " + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "text", + "content": " Utility image " + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "inline_equation", + "content": "I^{\\prime}" + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "text", + "content": " Patched area ratio range " + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "inline_equation", + "content": "s_l" + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "inline_equation", + "content": "s_h" + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "text", + "content": " Patched aspect ratio range " + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "inline_equation", + "content": "r_l" + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "inline_equation", + "content": "r_h" + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "text", + "content": " Output: Patched image " + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "inline_equation", + "content": "I^{*}" + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "text", + "content": " \n1 " + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "inline_equation", + "content": "S_{e}\\gets \\mathrm{Rand}(s_{l},s_{h})\\times S;" + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "text", + "content": " \n2 " + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "inline_equation", + "content": "r_e\\gets \\mathrm{Rand}(r_l,r_h)" + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "text", + "content": " \n3 " + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "inline_equation", + "content": "H_{e}\\leftarrow \\sqrt{S_{e}\\times r_{e}}" + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "inline_equation", + "content": "W_{e}\\leftarrow \\sqrt{\\frac{S_{e}}{r_{e}}};" + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "text", + "content": " \n4 while True do \n5 " + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "inline_equation", + "content": "x_{e}\\gets \\mathrm{Rand}(0,W),y_{e}\\gets \\mathrm{Rand}(0,H);" + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "inline_equation", + "content": "x_{e} + W_{e}\\leq W" + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "inline_equation", + "content": "y_{e} + H_{e}\\leq H" + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "text", + "content": " then \n7 " + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "inline_equation", + "content": "I_{e}\\gets (x_{e},y_{e},x_{e} + W_{e},y_{e} + H_{e});" + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "text", + "content": " \n8 " + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "inline_equation", + "content": "I(I_e)\\gets I'(I_e);" + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "text", + "content": " \n9 " + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "inline_equation", + "content": "I^{*}\\gets I" + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "text", + "content": " return " + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "inline_equation", + "content": "I^{*}" + }, + { + "bbox": [ + 44, + 71, + 249, + 282 + ], + "type": "text", + "content": " \n10 end \n12 end" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "algorithm" + }, + { + "bbox": [ + 45, + 312, + 301, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 312, + 301, + 361 + ], + "spans": [ + { + "bbox": [ + 45, + 312, + 301, + 361 + ], + "type": "text", + "content": "and fairness of our evaluation, we set our experiment conditions mostly the same as [16, 17]. The benchmark networks we selected are ResNet18, ResNet50, ResNet101 and ResNet152 without pre-trained parameters [20]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 361, + 300, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 361, + 300, + 456 + ], + "spans": [ + { + "bbox": [ + 45, + 361, + 300, + 456 + ], + "type": "text", + "content": "In all experiments, instead of introducing more advanced optimizers or training procedures such as [21], we selected the Adam optimizer (momentum " + }, + { + "bbox": [ + 45, + 361, + 300, + 456 + ], + "type": "inline_equation", + "content": "\\beta_{1} = 0.5" + }, + { + "bbox": [ + 45, + 361, + 300, + 456 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 45, + 361, + 300, + 456 + ], + "type": "inline_equation", + "content": "\\beta_{2} = 0.999" + }, + { + "bbox": [ + 45, + 361, + 300, + 456 + ], + "type": "text", + "content": ", initial learning rate of 0.001) to align with the settings of other data augmentation methods [16, 17, 22]. For our setting of the style augmentation parameter, we selected the style interpolation parameter " + }, + { + "bbox": [ + 45, + 361, + 300, + 456 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 45, + 361, + 300, + 456 + ], + "type": "text", + "content": " as 0.5 and augmentation ratio as 1:1 [16]. All experiments are trained on RTX 4090 with 100 epochs." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 459, + 300, + 567 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 53, + 459, + 299, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 459, + 299, + 483 + ], + "spans": [ + { + "bbox": [ + 53, + 459, + 299, + 483 + ], + "type": "text", + "content": "1) Original dataset without any advanced data augmentation techniques." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 483, + 300, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 483, + 300, + 530 + ], + "spans": [ + { + "bbox": [ + 52, + 483, + 300, + 530 + ], + "type": "text", + "content": "2) Dataset with naive data augmentation by simply copying and stacking the original dataset to match the same augmentation ratio as other groups, along with some routine augmentation operations." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 531, + 287, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 531, + 287, + 542 + ], + "spans": [ + { + "bbox": [ + 52, + 531, + 287, + 542 + ], + "type": "text", + "content": "3) Dataset with random style replacement by subregion." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 543, + 299, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 543, + 299, + 567 + ], + "spans": [ + { + "bbox": [ + 52, + 543, + 299, + 567 + ], + "type": "text", + "content": "4) Dataset with random style replacement at the pixel level (with an independent probability " + }, + { + "bbox": [ + 52, + 543, + 299, + 567 + ], + "type": "inline_equation", + "content": "p = 0.5" + }, + { + "bbox": [ + 52, + 543, + 299, + 567 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 45, + 577, + 165, + 589 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 577, + 165, + 589 + ], + "spans": [ + { + "bbox": [ + 45, + 577, + 165, + 589 + ], + "type": "text", + "content": "B. Classification Evaluation" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 594, + 300, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 594, + 300, + 700 + ], + "spans": [ + { + "bbox": [ + 45, + 594, + 300, + 700 + ], + "type": "text", + "content": "We evaluated our proposed data augmentation technique on the STL-10 dataset, with only 5,000 training images and 8,000 test color images. After the augmentation process, as mentioned in previous sections, the size of the augmented training set will double to 10,000 and the corresponding augmentation treatment will be randomly applied accordingly. We applied the same training settings as prior work, whose effectiveness in strategy and hyperparameter selection, including learning rate, has already been verified." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 702, + 301, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 702, + 301, + 727 + ], + "spans": [ + { + "bbox": [ + 46, + 702, + 301, + 727 + ], + "type": "text", + "content": "Our method achieved " + }, + { + "bbox": [ + 46, + 702, + 301, + 727 + ], + "type": "inline_equation", + "content": "81.6\\%" + }, + { + "bbox": [ + 46, + 702, + 301, + 727 + ], + "type": "text", + "content": " classification accuracy in just 100 training epochs, as shown in Fig. 2. This result is" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 322, + 53, + 550, + 167 + ], + "blocks": [ + { + "bbox": [ + 322, + 53, + 550, + 167 + ], + "lines": [ + { + "bbox": [ + 322, + 53, + 550, + 167 + ], + "spans": [ + { + "bbox": [ + 322, + 53, + 550, + 167 + ], + "type": "image", + "image_path": "7e026a64b50dfe4054d7a664322f122990eca0662d52b4f950f3efd2e0769e15.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 307, + 171, + 564, + 244 + ], + "lines": [ + { + "bbox": [ + 307, + 171, + 564, + 244 + ], + "spans": [ + { + "bbox": [ + 307, + 171, + 564, + 244 + ], + "type": "text", + "content": "Fig. 2: Classification Accuracy of ResNets on STL-10 test set. \"None\" represents original dataset. \"Naive\" represents dataset with naive data augmentation by simply stacking the original dataset. \"Pixel\" represents dataset with random style replacement at the pixel level. \"Subregion\" represents dataset with random style replacement by subregion." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 265, + 563, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 265, + 563, + 300 + ], + "spans": [ + { + "bbox": [ + 307, + 265, + 563, + 300 + ], + "type": "text", + "content": "both faster and more accurate than the " + }, + { + "bbox": [ + 307, + 265, + 563, + 300 + ], + "type": "inline_equation", + "content": "80.8\\%" + }, + { + "bbox": [ + 307, + 265, + 563, + 300 + ], + "type": "text", + "content": " accuracy after 100,000 epochs reported by Jackson et al. [16], highlighting our approach's efficiency and scalability." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 300, + 563, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 300, + 563, + 361 + ], + "spans": [ + { + "bbox": [ + 307, + 300, + 563, + 361 + ], + "type": "text", + "content": "We also tested our data augmentation technique across various network architectures including ResNet18, ResNet50, ResNet101, and ResNet152, where it consistently outperformed others, demonstrating its robustness and versatility for a wide range of computer vision tasks." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 360, + 564, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 360, + 564, + 467 + ], + "spans": [ + { + "bbox": [ + 307, + 360, + 564, + 467 + ], + "type": "text", + "content": "Furthermore, our findings support those of Zhong et al. [17], who found that erasing entire subregions is more effective than pixel-level erasing. Similarly, our data shows that random style replacement within subregions is a superior augmentation strategy, enhancing the training data's representational richness and contributing to faster model convergence and improved performance. This strategy maintains structural integrity and introduces variations that reflect the natural diversity of real-world datasets." + } + ] + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 310, + 475, + 435, + 599 + ], + "blocks": [ + { + "bbox": [ + 310, + 475, + 435, + 599 + ], + "lines": [ + { + "bbox": [ + 310, + 475, + 435, + 599 + ], + "spans": [ + { + "bbox": [ + 310, + 475, + 435, + 599 + ], + "type": "image", + "image_path": "54b898d8583a61f6a009a134ef5de79591a265e8ff8c71a6dae49cdf78b845e9.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 307, + 604, + 564, + 676 + ], + "lines": [ + { + "bbox": [ + 307, + 604, + 564, + 676 + ], + "spans": [ + { + "bbox": [ + 307, + 604, + 564, + 676 + ], + "type": "text", + "content": "Fig. 3: Loss of ResNets on STL-10 test set. \"None\" represents original dataset. \"Naive\" represents dataset with naive data augmentation by simply stacking the original dataset. \"Pixel\" represents dataset with random style replacement at the pixel level. \"Subregion\" represents dataset with random style replacement by subregion." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 437, + 475, + 561, + 599 + ], + "blocks": [ + { + "bbox": [ + 437, + 475, + 561, + 599 + ], + "lines": [ + { + "bbox": [ + 437, + 475, + 561, + 599 + ], + "spans": [ + { + "bbox": [ + 437, + 475, + 561, + 599 + ], + "type": "image", + "image_path": "fe8a4d9da32436ee4f701ac6a4674312ac9234f40e25d7c3bd7ea6a90ef3413b.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 689, + 564, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 689, + 564, + 727 + ], + "spans": [ + { + "bbox": [ + 308, + 689, + 564, + 727 + ], + "type": "text", + "content": "To confirm the effectiveness of our data augmentation technique, we analyzed the test loss of each method on the STL-10 test set, as shown in Fig. 3. In contrast to the naive" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 57, + 301, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 57, + 301, + 140 + ], + "spans": [ + { + "bbox": [ + 45, + 57, + 301, + 140 + ], + "type": "text", + "content": "dataset, whose test loss stops converging after the 20th epoch, all augmented datasets show improved convergence speed and reduced loss variability. Notably, the style augmentation strategy that randomly replaces subregions achieves the fastest convergence and the most stable training process. Despite varying effectiveness in stabilizing training loss across ResNets, our method's performance remains consistently stable." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 133, + 151, + 214, + 162 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 151, + 214, + 162 + ], + "spans": [ + { + "bbox": [ + 133, + 151, + 214, + 162 + ], + "type": "text", + "content": "V. CONCLUSIONS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 45, + 168, + 301, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 168, + 301, + 312 + ], + "spans": [ + { + "bbox": [ + 45, + 168, + 301, + 312 + ], + "type": "text", + "content": "In conclusion, our random style replacement strategy offers a practical and scalable data augmentation solution for the STL-10 dataset and beyond. By innovatively combining [16] and [17], our proposed data augmentation framework demonstrates superior performance and achieves faster convergence. By randomly replacing subregions rather than individual pixels, we preserve critical structural information while introducing meaningful variability, resulting in faster training convergence and higher accuracy. Our experiments with multiple ResNet architectures consistently verify the robustness of this method, showcasing its versatility for diverse computer vision applications." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 129, + 322, + 217, + 333 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 322, + 217, + 333 + ], + "spans": [ + { + "bbox": [ + 129, + 322, + 217, + 333 + ], + "type": "text", + "content": "VI. FUTURE WORK" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 340, + 301, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 340, + 301, + 495 + ], + "spans": [ + { + "bbox": [ + 45, + 340, + 301, + 495 + ], + "type": "text", + "content": "Our random style replacement method has shown promising results, yet further validation is needed to confirm its wider applicability. It is crucial to test this technique across various datasets and tasks to establish its generalizability and identify any limitations. Additionally, the convergence speed observations require confirmation through further experiments involving diverse datasets and network architectures. Moreover, integrating Large Language Models (LLMs) guided approaches [23] could enhance the method. These approaches would use LLMs to guide style replacement, potentially selecting optimal subregions for style transfer based on foreground and background information, thus enabling more meaningful and effective transformations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 144, + 504, + 203, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 144, + 504, + 203, + 514 + ], + "spans": [ + { + "bbox": [ + 144, + 504, + 203, + 514 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 518, + 299, + 725 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 52, + 518, + 299, + 554 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 518, + 299, + 554 + ], + "spans": [ + { + "bbox": [ + 52, + 518, + 299, + 554 + ], + "type": "text", + "content": "[1] H.-C. Dan, Z. Huang, B. Lu, and M. Li, \"Image-driven prediction system: Automatic extraction of aggregate gradation of pavement core samples integrating deep learning and interactive image processing framework,\" Construction and Building Materials, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 554, + 299, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 554, + 299, + 572 + ], + "spans": [ + { + "bbox": [ + 52, + 554, + 299, + 572 + ], + "type": "text", + "content": "[2] P. Li et al., \"Contextual hourglass network for semantic segmentation of high resolution aerial imagery,\" in ICECAI. IEEE, 2024, pp. 15-18." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 572, + 299, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 572, + 299, + 590 + ], + "spans": [ + { + "bbox": [ + 51, + 572, + 299, + 590 + ], + "type": "text", + "content": "[3] A. Coates, A. Ng, and H. Lee, \"An analysis of single-layer networks in unsupervised feature learning,\" in PMLR, 2011, pp. 215-223." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 590, + 299, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 590, + 299, + 608 + ], + "spans": [ + { + "bbox": [ + 51, + 590, + 299, + 608 + ], + "type": "text", + "content": "[4] T. Hastie et al., \"Unsupervised learning,\" The elements of statistical learning: Data mining, inference, and prediction, pp. 485-585, 2009." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 51, + 608, + 299, + 626 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 608, + 299, + 626 + ], + "spans": [ + { + "bbox": [ + 51, + 608, + 299, + 626 + ], + "type": "text", + "content": "[5] Q. Yang et al., \"A comparative study on enhancing prediction in social network advertisement through data augmentation,\" in MLISE, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 51, + 626, + 299, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 626, + 299, + 644 + ], + "spans": [ + { + "bbox": [ + 51, + 626, + 299, + 644 + ], + "type": "text", + "content": "[6] K. Ding et al., \"Data augmentation for deep graph learning: A survey,\" ACM SIGKDD Explorations Newsletter, vol. 24, no. 2, pp. 61-77, 2022." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 51, + 644, + 299, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 644, + 299, + 662 + ], + "spans": [ + { + "bbox": [ + 51, + 644, + 299, + 662 + ], + "type": "text", + "content": "[7] T. Kumar et al., \"Image data augmentation approaches: A comprehensive survey and future directions,\" IEEE Access, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 51, + 662, + 299, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 662, + 299, + 689 + ], + "spans": [ + { + "bbox": [ + 51, + 662, + 299, + 689 + ], + "type": "text", + "content": "[8] M. Bayer et al., \"Data augmentation in natural language processing: a novel text generation approach for long and short text classifiers,\" *IJMLC*, vol. 14, no. 1, p. 135-150, Apr. 2022." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 51, + 689, + 299, + 707 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 689, + 299, + 707 + ], + "spans": [ + { + "bbox": [ + 51, + 689, + 299, + 707 + ], + "type": "text", + "content": "[9] A. Sankar et al., \"Self-supervised role learning for graph neural networks,\" Knowledge and Information Systems, 2022." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 708, + 299, + 725 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 708, + 299, + 725 + ], + "spans": [ + { + "bbox": [ + 47, + 708, + 299, + 725 + ], + "type": "text", + "content": "[10] K. Narang et al., “Ranking user-generated content via multi-relational graph convolution,” in SIGIR, 2021, pp. 470–480." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 58, + 563, + 293 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 310, + 58, + 563, + 85 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 58, + 563, + 85 + ], + "spans": [ + { + "bbox": [ + 310, + 58, + 563, + 85 + ], + "type": "text", + "content": "[11] Y. Jin et al., \"Representation and extraction of diesel engine maintenance knowledge graph with bidirectional relations based on bert and the bilstm-crf model,\" in ICEBE. IEEE, 2021, pp. 126-133." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 310, + 85, + 563, + 103 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 85, + 563, + 103 + ], + "spans": [ + { + "bbox": [ + 310, + 85, + 563, + 103 + ], + "type": "text", + "content": "[12] S. Li, H. Xu, and H. Chen, “Focused react: Improving react through reiterate and early stop,” in WiNLP Workshop, 2024." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 310, + 103, + 563, + 121 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 103, + 563, + 121 + ], + "spans": [ + { + "bbox": [ + 310, + 103, + 563, + 121 + ], + "type": "text", + "content": "[13] D. P. Kingma and M. Welling, \"Auto-encoding variational bayes,\" arXiv preprint arXiv:1312.6114, 2013." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 310, + 121, + 563, + 130 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 121, + 563, + 130 + ], + "spans": [ + { + "bbox": [ + 310, + 121, + 563, + 130 + ], + "type": "text", + "content": "[14] I. J. Goodfellow et al., \"Generative adversarial networks,\" arXiv, 2014." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 130, + 563, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 130, + 563, + 148 + ], + "spans": [ + { + "bbox": [ + 310, + 130, + 563, + 148 + ], + "type": "text", + "content": "[15] J. Ho et al., “Denoising diffusion probabilistic models,” Advances in neural information processing systems, vol. 33, pp. 6840–6851, 2020." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 148, + 563, + 166 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 148, + 563, + 166 + ], + "spans": [ + { + "bbox": [ + 310, + 148, + 563, + 166 + ], + "type": "text", + "content": "[16] P. T. Jackson et al., \"Style augmentation: data augmentation via style randomization.\" in CVPR workshops, vol. 6, 2019, pp. 10-11." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 166, + 563, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 166, + 563, + 184 + ], + "spans": [ + { + "bbox": [ + 310, + 166, + 563, + 184 + ], + "type": "text", + "content": "[17] Z. Zhong et al., “Random erasing data augmentation,” in AAAI, vol. 34, no. 07, 2020, pp. 13001-13008." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 184, + 563, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 184, + 563, + 201 + ], + "spans": [ + { + "bbox": [ + 310, + 184, + 563, + 201 + ], + "type": "text", + "content": "[18] X. Xu et al., \"Style transfer: From stitching to neural networks,\" in ICBASE. IEEE, 2024, pp. 526-530." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 201, + 563, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 201, + 563, + 220 + ], + "spans": [ + { + "bbox": [ + 310, + 201, + 563, + 220 + ], + "type": "text", + "content": "[19] Z. Ding, P. Li, Q. Yang, S. Li, and Q. Gong, “Regional style and color transfer,” in CVIDL. IEEE, 2024, pp. 593-597." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 220, + 563, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 220, + 563, + 237 + ], + "spans": [ + { + "bbox": [ + 310, + 220, + 563, + 237 + ], + "type": "text", + "content": "[20] K. He et al., “Deep residual learning for image recognition,” in CVPR, 2016, pp. 770–778." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 237, + 563, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 237, + 563, + 255 + ], + "spans": [ + { + "bbox": [ + 310, + 237, + 563, + 255 + ], + "type": "text", + "content": "[21] Q. Xu et al., \"A stochastic gda method with backtracking for solving nonconvex (strongly) concave minimax problems,\" arXiv, 2024." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 255, + 563, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 255, + 563, + 274 + ], + "spans": [ + { + "bbox": [ + 310, + 255, + 563, + 274 + ], + "type": "text", + "content": "[22] D. P. Kingma and J. Ba, “Adam: A method for stochastic optimization,” arXiv preprint arXiv:1412.6980, 2014." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 274, + 563, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 274, + 563, + 293 + ], + "spans": [ + { + "bbox": [ + 310, + 274, + 563, + 293 + ], + "type": "text", + "content": "[23] Z. Ding, P. Li, Q. Yang, and S. Li, \"Enhance image-to-image generation with llava-generated prompts,\" in ISPDS. IEEE, 2024, pp. 77-81." + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file