diff --git a/.gitattributes b/.gitattributes index d06acdc32d7b807303bd25f6200e3be5d726ad4d..1ad5852bd221001629b2578b761a4945eafdb1d6 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1979,3 +1979,11 @@ data/2025/2503_13xxx/2503.13646/ec61822f-80f5-4b4c-80e5-df7fb23ac8f4_origin.pdf data/2025/2503_13xxx/2503.13657/f9f0f12f-c46c-4f07-9710-e29462265246_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2503_13xxx/2503.13721/a97f1dd6-65e6-4086-81b6-b457f406ad62_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2503_16xxx/2503.16527/a8560f1b-d038-456e-8a05-1cf3f71e9afc_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_02xxx/2503.02318/185a8d9e-02ca-40b7-9498-1733bd88ebda_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_02xxx/2503.02324/ccb8695f-9db7-4b2d-953a-29631612cfb2_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_02xxx/2503.02357/cde5e469-d77b-49c6-a6a3-634c026a56f4_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_02xxx/2503.02358/afddd584-7d0d-4efd-a372-e9f42b569ad5_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_02xxx/2503.02390/635e71fe-44a1-404f-ae63-3692c9e475c1_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_02xxx/2503.02407/659ebb2e-73c9-4ddb-871a-4d8552af4b45_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_02xxx/2503.02424/8601c576-a221-400a-ab72-3a3c50fe8e1d_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_02xxx/2503.02445/79b9e0b6-153b-484b-87de-9d51773ebdf4_origin.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/data/2025/2503_02xxx/2503.02318/185a8d9e-02ca-40b7-9498-1733bd88ebda_content_list.json b/data/2025/2503_02xxx/2503.02318/185a8d9e-02ca-40b7-9498-1733bd88ebda_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..42c9b4af0119072c2c26d220d75ebd0d13af66de --- /dev/null +++ b/data/2025/2503_02xxx/2503.02318/185a8d9e-02ca-40b7-9498-1733bd88ebda_content_list.json @@ -0,0 +1,3732 @@ +[ + { + "type": "text", + "text": "Audio-Reasoner: Improving Reasoning Capability in Large Audio Language Models", + "text_level": 1, + "bbox": [ + 178, + 121, + 816, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zhifei Xie $^{1\\dagger}$ Mingbao Lin $^{3\\dagger}$ Zihang Liu $^{2\\dagger}$", + "bbox": [ + 336, + 224, + 658, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Pengcheng Wu1 Shuicheng Yan2† Chunyan Miao1‡", + "bbox": [ + 303, + 239, + 691, + 255 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Nanyang Technological University $^{2}$ National University of Singapore $^{3}$ Rakuten", + "bbox": [ + 233, + 255, + 763, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "†Equal Contributions ‡Corresponding Authors", + "bbox": [ + 348, + 268, + 648, + 285 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "zhifei001@e.ntu.edu.sg linmb001@outlook.com liuzihang99@gmail.com", + "bbox": [ + 246, + 285, + 750, + 297 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "pengchengwu@ntu.edu.sg yansc@nus.edu.sg ascymiao@ntu.edu.sg", + "bbox": [ + 269, + 300, + 725, + 311 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Project: https://github.com/xzf-thu/Audio-Reasoner", + "bbox": [ + 294, + 313, + 702, + 325 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 459, + 361, + 537, + 376 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent advancements in multimodal reasoning have largely overlooked the audio modality. We introduce Audio-Reasoner, a large-scale audio language model for deep reasoning in audio tasks. We curate a diverse collection of multi-task audio datasets with simple annotations, refining them through structured secondary labeling and complex question generation. Additionally, We utilize advanced closed-source models to generate structured reasoning chains, transforming raw annotations into a formatted inference process. Following inference scaling principles, we train Audio-Reasoner on CoTA, a high-quality reasoning dataset with 1.2 million reasoning-rich samples. Experiments show state-of-the-art performance across key benchmarks, including MMAU-mini $(+25.42\\%)$ , AIR-Bench chat/foundation $(+14.57\\% / + 10.13\\%)$ , and MELD $(+8.01\\%)$ . Our findings stress the core of structured CoT training in advancing audio reasoning.", + "bbox": [ + 228, + 388, + 767, + 556 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/6b8d76aec6a568542a810d60105df57d132003453be82af5476ad0dd56265004.jpg", + "image_caption": [ + "Figure 1: Benchmark performance of Audio-Reasoner on tasks of speech emotion recognition (SER), speech-to-text translation (S2TT), MMAU, and AIR-Bench chat. S2TT is measured using BLEU while SER and MMAU are measured ACC (left axis). AIR-Bench is scored by GPT (right axis)." + ], + "image_footnote": [], + "bbox": [ + 189, + 580, + 810, + 834 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2503.02318v2 [cs.SD] 20 Sep 2025", + "bbox": [ + 22, + 277, + 60, + 717 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "Technical report.", + "bbox": [ + 171, + 922, + 274, + 936 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 173, + 89, + 312, + 104 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recent advancements in large language models (LLMs) have significantly enhanced their reasoning capabilities, driven by innovations like chain-of-thought (CoT) and inference scaling. Models such as OpenAI's o1 [Jaech et al., 2024] and Deepseek-R1 [Guo et al., 2025] have demonstrated significant improvements, especially in tasks like mathematics and coding [Team et al., 2025, Zhao et al., 2024a, Muennighoff et al., 2025, Liu et al., 2024a, Zhang et al., 2024b, Deng et al., 2024, Hui et al., 2024, Yang et al., 2024a]. These models have set a precedent for achieving \"deep thinking\" by tackling complex, structured tasks. The CoT framework, in particular, has been successfully applied to multimodal models, improving their cognitive abilities in image and video reasoning. Models such as Visual-CoT [Shao et al., 2024a], LLaVA-Reasoner [Zhang et al., 2024a], and MAmmoTH-VL [Guo et al., 2024] have demonstrated that large-scale datasets and multidimensional reasoning can enhance model performance, even in tasks involving images and videos [Zou et al., 2023]. Other works, like Mulberry [Wen et al., 2019] and Image-of-Thought [Zhou et al., 2024], integrate reflective and image-editing tools to further refine multimodal reasoning, offering new pathways for understanding complex queries across multiple modalities.", + "bbox": [ + 169, + 113, + 826, + 308 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "However, the application of CoT in the audio modality has been largely underexplored. Although models like Audio Flamingo [Kong et al., 2024], SALMONN [Tang et al., 2023], and Qwen2-Audio [Chu et al., 2024] have pushed the boundaries of large audio language models (LALMs), these advancements have not yet incorporated CoT reasoning at scale. Recent research [Ma et al., 2025] suggests that existing CoT methods, such as zero-shot reasoning in audio tasks, fail to significantly improve performance on more complex queries. This limitation is largely attributed to the simplicity of existing audio datasets—such as AudioSet [Gemmeke et al., 2017], AudioCaps [Kim et al., 2019], and Clotho [Drossos et al., 2020)—which predominantly feature short, simple labels. These simplified datasets hinder the development of LALMs capable of more intricate reasoning. Without richer, more complex data, these models struggle with long-form reasoning, and the application of CoT often leads to severe hallucinations and degraded performance. Therefore, advancing CoT in LALMs necessitates overcoming these dataset limitations to allow for more effective, deep reasoning.", + "bbox": [ + 169, + 314, + 826, + 482 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address the challenges in audio-based reasoning, we propose a scalable and effective approach to generating high-quality pretraining data. Using state-of-the-art commercial models, we introduce CoTA, a large-scale dataset containing 1.2 million refined captions and question-answer (QA) pairs. CoTA spans multiple datasets and tasks, undergoing rigorous filtering to ensure diversity and quality. Building on CoTA, we develop Audio-Reasoner, a large audio language model designed for long-context reasoning. Audio-Reasoner is trained with a 4K token context window and generates structured CoT reasoning with length could more than exceeding 1K tokens in real-world tasks. The model is fine-tuned on CoTA using supervised fine-tuning, adhering to a structured reasoning framework, as illustrated in Figure 2: (1) Planning—Identifies key problem components from the user query and formulates a structured sequence of reasoning steps essential for deriving an answer. (2) Caption—Extracts and integrates relevant multimodal content from the input to enrich the reasoning process. (3) Reasoning—Executes a systematic, step-by-step reasoning procedure to ensure logical coherence and accuracy. (4) Summary—Synthesizes the reasoning process into a final response that is concise, contextually grounded, and precise.", + "bbox": [ + 169, + 488, + 826, + 681 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our experimental results, partially presented in Figure 1, demonstrate the effectiveness of Audio-Reasoner. More comprehensively, we evaluate the model across multiple benchmarks: MMAU-mini [Sakshi et al., 2024]: A dataset with 1,500 closed-choice questions testing reasoning across sound, speech, and music. AIR-Bench [Yang et al., 2024b]: Various types of audio signals including human speech, natural sounds, and music. CoVoST2(zh-en) [Wang et al., 2021]: Speech-to-text translation in Chinese and English. MELD [Poria et al., 2019]: Emotion classification. Compared to Qwen2-Audio-Instruct [Chu et al., 2024], Audio-Reasoner achieves: $+25.4\\%$ improvement on MMAU-mini with reasoning subtask gains: $+44.4\\%$ , $+26.1\\%$ , and $+9.3\\%$ ; $+14.6\\%$ gains on AIR-Bench chat; $+30.6\\%$ on CoVoST2(ZN/EN subset, Average BLEU score.); $+8.01\\%$ on MELD. These results validate the effectiveness of our approach in advancing long-context reasoning and inference scaling for audio models.", + "bbox": [ + 169, + 688, + 826, + 840 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The major contributions we have made in this paper include:", + "bbox": [ + 171, + 848, + 571, + 863 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- We propose Audio-Reasoner, designed for deep reasoning and inference scaling in the audio modality. Built upon Qwen2-Audio and fine-tuned with structured CoT training, Audio-Reasoner significantly improves long-context reasoning across diverse audio tasks.", + "bbox": [ + 171, + 869, + 823, + 912 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We develop CoTA, a large-scale dataset with 1.2 million high-quality captions and QA pairs, spanning multiple audio domains. The dataset enables structured reasoning and enhances audio-language pretraining.", + "- We introduce a scalable data generation pipeline leveraging advanced commercial models to produce complex reasoning-based QA pairs and structured CoT annotations, enriching model training.", + "- We achieve state-of-the-art performance, with $+25.4\\%$ gains over Qwen2-Audio-Instruct on MMAU-mini, along with significant improvements in reasoning, translation, and emotion recognition tasks." + ], + "bbox": [ + 169, + 90, + 826, + 223 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 171, + 239, + 321, + 255 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Chain-of-Thought Reasoning. LLMs leverage in-context learning (ICL) to enhance their reasoning capabilities by processing prompts and context. This is further strengthened through CoT techniques. Various CoT methods have been explored, including Tree of Thoughts (TOT) [Yao et al., 2023], manual few-shot CoT [Wei et al., 2022], and automatically generated approaches [Jin et al., 2024, Zhang et al., 2022]. In addition, studies have delved into the necessity of CoT, its theoretical foundations, and its effectiveness across a wide range of tasks [Sprague et al., 2024, Stechly et al., 2024, Turpin et al., 2023]. The release of OpenAI's o1 model [Jaech et al., 2024] has sparked renewed interest in CoT research, significantly boosting the capabilities of LLMs, especially in multi-step reasoning tasks such as coding [Zhang et al., 2024b] and mathematics [Yang et al., 2024a], setting new performance benchmarks. CoT techniques have been integrated with other methods such as Monte Carlo Tree Search [Browne et al., 2012], reflection [Guo et al., 2025], and tool use [Qin et al., 2023], and are often trained alongside reinforcement learning approaches [Rafailov et al., 2023, Zeng et al., 2024, Engstrom et al., 2020, Shao et al., 2024b].", + "bbox": [ + 169, + 267, + 826, + 446 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Multimodal Chain-of-Thought. CoT techniques have also been explored in the realm of multimodal large models. For example, Visual-COT [Shao et al., 2024a] incorporates object detection to aid in reasoning, LLaVA-Reasoner [Zhang et al., 2024a] uses closed-source models for CoT fine-tuning through recaptioning. LLaVA-CoT [Xu et al., 2024] and MAmmoTH-VL [Guo et al., 2024] scale datasets to improve model performance. Other models, such as Mulberry [Wen et al., 2019], explore application of reflective thinking, and Image-of-Thought [Zhou et al., 2024] integrates image editing tools to enhance reasoning. Video-related studies [Wang et al., 2024, Han et al., 2024, Fei et al., 2024, Tang et al., 2024] have demonstrated the effectiveness of CoT in reasoning tasks within the video domain. However, the application of CoT in the audio domain is still in its infancy. The study Audio-COT [Ma et al., 2025] shows some improvement with zero-shot COT in audio tasks, but it falls short on more complex problems. This paper aims to explore this gap further.", + "bbox": [ + 169, + 455, + 826, + 609 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Large Audio Language Models. LALMs can be broadly categorized into two areas: audio understanding and real-time dialogue. Audio understanding models typically consist of a three-layer architecture—an encoder, connector, and an LLM—focusing on specific domains, as seen in models like Mu-LLaMA [Liu et al., 2024b], LTU [Gong et al., 2023b], EmoBox [Ma et al., 2024], and GAMA [Ghosh et al., 2024]. Other models, such as LTU-AS [Gong et al., 2023a], SALMONN [Tang et al., 2023] and Qwen2-Audio [Chu et al., 2024], employ unified architectures designed for multi-task training. Real-time conversation models, which focus on speech input and extend transformers to real-time speech synthesis, are also gaining popularity [Zhang et al., 2023b, Xie and Wu, 2024a,b, Fu et al., 2025, Defossez et al., 2024]. However, despite their focus on understanding and rapid response, current LALMs still lack significant exploration into reasoning tasks, such as COT. This paper addresses this gap by investigating the application of CoT in LALMs.", + "bbox": [ + 169, + 619, + 826, + 772 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 Audio-Reasoner", + "text_level": 1, + "bbox": [ + 171, + 787, + 341, + 803 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we present the training methodology for our Audio-Reasoner model, designed to enable LALMs with structured, step-by-step reasoning in multimodal tasks. Unlike standard models, Audio-Reasoner generates both a chain-of-thought (CoT) and final response, ensuring logical consistency and interpretability. We describe the training framework, which includes a structured CoT approach, a novel data generation pipeline, and the large-scale CoTA dataset. This dataset is carefully constructed through automated annotation, reasoning chain creation, and validation, setting the foundation for robust and explainable audio-based reasoning.", + "bbox": [ + 169, + 814, + 823, + 912 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/43b5b9c5fc69a44f947aab314c7528399913cc4f40054888654c56601c09c66b.jpg", + "image_caption": [ + "Figure 2: Comparison between Audio-Reasoner and Qwen2-Audio-Instruct: While Qwen2-Audio-Instruct produces brief and error-prone reasoning, our Audio-Reasoner uses a structured reasoning framework with distinct \"thinking\" and \"response\" phases, ensuring a more reliable and well-supported output through careful planning, information extraction, and step-by-step reasoning." + ], + "image_footnote": [], + "bbox": [ + 173, + 89, + 823, + 369 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 Model Training with Audio Reasoning", + "text_level": 1, + "bbox": [ + 171, + 465, + 478, + 479 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "A standard large language model is trained to generate an output sequence $Y$ given an input sequence $X$ . The probability distribution of the model's output is formulated as:", + "bbox": [ + 171, + 484, + 823, + 513 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nP (Y \\mid X; \\theta) = f _ {\\theta} (X), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 424, + 522, + 825, + 539 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $f_{\\theta}$ is a Transformer-based model parameterized by $\\theta$ . The training objective follows a maximum likelihood estimation framework:", + "bbox": [ + 169, + 547, + 823, + 575 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} (\\theta) = - \\sum_ {i = 1} ^ {N} \\log P \\left(Y _ {i} \\mid X _ {i}; \\theta\\right). \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 393, + 583, + 825, + 625 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In our Audio-Reasoner, the input consists of an audio signal $A$ and a text-based query $Q$ , forming the multimodal input representation:", + "bbox": [ + 169, + 642, + 823, + 670 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nX = (A, Q). \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 452, + 672, + 825, + 689 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Unlike conventional LLMs, where the output is a single response, we structure the model's output into two distinct components: the chain of thought reasoning $C$ , which captures the step-by-step logical process, and the final response $R$ , which provides the ultimate answer. The model thus learns to generate the concatenation of $C$ and $R$ , leading to the probability distribution:", + "bbox": [ + 169, + 703, + 823, + 761 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nP (C, R \\mid A, Q; \\theta) = f _ {\\theta} (A, Q). \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 397, + 768, + 825, + 785 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To ensure explicit learning of both reasoning and final response generation, we construct a dataset defined as:", + "bbox": [ + 169, + 803, + 823, + 830 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {D} = \\left\\{\\left(A _ {i}, Q _ {i}, C _ {i}, R _ {i}\\right) \\right\\} _ {i = 1} ^ {N}, \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 403, + 832, + 825, + 849 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where each training sample consists of an input audio signal $A_{i}$ , its corresponding textual query $Q_{i}$ , the structured reasoning process $C_{i}$ , and the final answer $R_{i}$ . This dataset formulation reinforces the model's ability to perform in-context learning and deep reasoning, ensuring that generated responses are not only accurate but also logically structured.", + "bbox": [ + 169, + 854, + 825, + 912 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The training objective maximizes the likelihood of both $C$ and $R$ , encouraging the model to first reason and then generate a response. The loss function is given by:", + "bbox": [ + 169, + 90, + 823, + 122 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} (\\theta) = - \\sum_ {i = 1} ^ {N} \\log P \\left(C _ {i}, R _ {i} \\mid A _ {i}, Q _ {i}; \\theta\\right). \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 367, + 127, + 825, + 167 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "By optimizing this objective, Audio-Reasoner is trained to articulate a structured reasoning process before providing its final response. This approach enhances interpretability, reliability, and alignment with human reasoning.", + "bbox": [ + 169, + 180, + 823, + 223 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "At inference-time, our Audio-Reasoner follows a structured reasoning pipeline, as illustrated in Figure 2. The reasoning process consists of four sequential steps: (1) Planning $(P)$ : The model analyzes the query, identifies key problem components, and outlines the reasoning steps necessary to derive an answer. (2) Captioning $(C)$ : Relevant multimodal content is extracted from the input, such as speech transcription, acoustic event detection, or context information. (3) Reasoning $(R)$ : Based on the extracted content, the model performs structured, step-by-step reasoning. (4) Summary $(S)$ : The model synthesizes its reasoning process into a final, concise, and accurate response. This structured inference process can be formalized as follows:", + "bbox": [ + 169, + 228, + 826, + 340 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nP \\sim f _ {\\theta} (A, Q), \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 416, + 345, + 825, + 362 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nC \\sim f _ {\\theta} (A, Q, P), \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 416, + 364, + 825, + 378 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nR \\sim f _ {\\theta} (A, Q, P, C), \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 418, + 381, + 825, + 397 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nS \\sim f _ {\\theta} (A, Q, P, C, R). \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 419, + 398, + 825, + 414 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Compared to the direct-response counterpart [Chu et al., 2024], this approach provides two key advantages: Improved Interpretability—By explicitly modeling each reasoning step, the process becomes more transparent, making it easier to analyze and diagnose errors. Reduced Hallucinations—The structured reasoning pipeline mitigates speculative or incorrect responses, ensuring that outputs remain logically grounded.", + "bbox": [ + 169, + 428, + 828, + 500 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Figure 2 illustrates the structured CoT reasoning process, highlighting how each stage contributes to the final response. This approach draws inspiration from recent advancements in symbolic reasoning and CoT training [Cui et al., 2024], which emphasize that zero-shot reasoning without training is less effective. Moreover, previous studies have shown that models tuned on native CoT data significantly outperform those trained on generic labels, especially in multimodal reasoning tasks [Guo et al., 2024, Wen et al., 2019].", + "bbox": [ + 169, + 503, + 826, + 585 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/88d6d1d91a5fcae4c317dc397974c8a5e17fb49b38e93a72633130f64611aca7.jpg", + "table_caption": [ + "Table 1: Domains and tasks of our constructed CoTA dataset." + ], + "table_footnote": [], + "table_body": "
DomainTaskDescription
SoundSound QASound question answering
SpeechSpeech QASpeech question answering
SERSpeaker emotion recognition
S2TTSpeech to text translation
MusicMusic QAMusic question answering
", + "bbox": [ + 305, + 616, + 687, + 731 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2 Systematic Data Preparation for Audio Reasoning", + "text_level": 1, + "bbox": [ + 171, + 747, + 563, + 763 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Training the Audio-Reasoner model requires a high-quality, diverse, and multitask audio-based reasoning dataset. Our goal is to develop a scalable and effective data generation method that systematically transforms raw audio data and simple human-labeled annotations into structured reasoning tasks. The resulting CoTA dataset with 1.2 million samples, focusing on complex reasoning-based question-answering tasks, spans three domains—audio, speech, and music—as detailed in Table 1.", + "bbox": [ + 169, + 766, + 826, + 849 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To achieve this, we design a structured data generation pipeline consisting of three key stages: (1) generating high-quality annotations and diverse questions, (2) constructing structured reasoning chains, and (3) performing comprehensive validation. The complete pipeline is illustrated in Figure 3. The following sections describe each stage in detail.", + "bbox": [ + 169, + 854, + 826, + 912 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/cd2008c2e248f8239147184f97d37d4637b0f8e288fd39a3f6af8d50411851e3.jpg", + "image_caption": [ + "Figure 3: Multistage data generation pipeline." + ], + "image_footnote": [], + "bbox": [ + 176, + 89, + 823, + 349 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.2.1 Multistage Data Generation Pipeline", + "text_level": 1, + "bbox": [ + 171, + 402, + 480, + 417 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Stage 1: Automated Annotation and Question-Answer Synthesis. We begin by using advanced external closed-source models to improve the existing simple human annotations into high-quality and logically coherent training data. While large language models can produce hallucinations in free-form generation, they are highly effective at structured, evidence-based content creation. We leverage this strength by instructing the model to sequentially describe the audio elements, enhancing its understanding of sound sources and speech context. Based on these enriched descriptions, the model generates a diverse set of questions, ranging from straightforward factual queries to complex logical reasoning problems. This process ensures that the dataset captures a broad spectrum of reasoning tasks, as summarized in Table 1. Besides, in Sec. A.1 we provide the prompt in deducing closed-source models to generate questions and corresponding answers.", + "bbox": [ + 169, + 419, + 826, + 559 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Stage 2: Structured Reasoning Chain Construction. Next, we transform the generated question-answer pairs into structured reasoning chains. Given the limited development of CoT methodologies in the audio domain, we adopt a systematic approach to ensure inference stability. The model first plans and analyzes the questions, extracts key information from the captions, and formulates logical steps leading to the answer. To facilitate structured reasoning, we employ explicit step demarcations such as and , allowing the model to autonomously construct multi-step inference pathways. Sec. A.2 describes the prompt used for the structured reasoning chain construction process.", + "bbox": [ + 169, + 565, + 826, + 676 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Stage 3: Quality Assurance and Dataset Validation. Finally, we subject the generated data to a rigorous review process. Using the raw audio input, Stage 1 annotations, and Stage 2 reasoning chains, the model assesses whether the generated content is accurate, coherent, and suitable for inclusion in the final dataset. This step ensures the overall quality and reliability of the CoTA dataset. Sec. A.3 illustrates prompt used for filtering low-quality contents.", + "bbox": [ + 169, + 683, + 828, + 753 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.2.2 Task Taxonomy: Categories and Reasoning Methodologies", + "text_level": 1, + "bbox": [ + 171, + 761, + 633, + 776 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The CoTA dataset encompasses a range of reasoning-based tasks, each requiring distinct reasoning path that the model should grasp. These include:", + "bbox": [ + 169, + 777, + 823, + 808 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "(1) Sound-Based Question Answering: The model identifies and analyzes sound characteristics, contextualizing them within the user's query to derive a reasoned response. (2) Speech-Based Question Answering: The model recognizes speaker timbres, transcribes speech content, and incrementally processes the question to determine the appropriate answer. (3) Speech Emotion Recognition (SER) and Speech-to-Text Translation (S2TT): These specialized tasks require the model to integrate speech recognition with emotion analysis and language translation, forming a structured reasoning process. (4) Music-Based Question Answering: As music is highly abstract, the", + "bbox": [ + 169, + 814, + 826, + 912 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 936, + 504, + 946 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/3ec1e7650c3c779d4d0d24f9cb1ea3e3f0794e2a588ef85c7281f11c5d6a2d1f.jpg", + "table_caption": [ + "Table 2: Composition of our CoTA Dataset. We consider Google Gemini [Team et al., 2024] to build the reasoning ability in CoTA. Note that Multi-Speaker and Complex Audio datasets are manually synthesized, details of which can be referred to Sec. B." + ], + "table_footnote": [], + "table_body": "
CategoryDataset SourceMain Skills LearningModel UsedQuantityPercentageSynthetic
SpeechMulti-SpeakerMulti-speaker Speech QAgemini-2.0-flash117.4k12.09%Yes
MELD [Poria et al., 2019]Speech Emotion QAgemini-2.0-pro-exp29.2k3.01%No
CoVoST2 [Wang et al., 2021]Speech-to-Text Translationgemini-2.0-flash224.6k23.13%No
Mustard [Castro et al., 2019]Sarcasm Detectiongemini-2.0-pro-exp1k0.10%No
MusicMusicBench [Melechovsky et al., 2024]Music QAgemini-2.0-flash137.1k14.12%No
SoundAudioSet [Gemmeke et al., 2017]Sound QAgemini-2.0-flash315.2k32.46%No
Clotho [Drossos et al., 2020]Sound QAgemini-2.0-pro-exp9.3k0.93%No
AudioCaps [Kim et al., 2019]Sound QAgemini-2.0-flash117.5k12.10%No
Complex AudioComplex Audio QAgemini-2.0-flash20k2.06%Yes
", + "bbox": [ + 174, + 137, + 823, + 316 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "model first analyzes fundamental attributes such as tonality, tempo, and emotion before progressing to genre classification and deeper inferential reasoning based on the user's query. A comprehensive breakdown of the dataset's task categories and reasoning pathways is provided in Table 1.", + "bbox": [ + 169, + 342, + 823, + 387 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.2.3 Conclusion and Next Steps", + "text_level": 1, + "bbox": [ + 171, + 393, + 413, + 409 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In summary, we have introduced a systematic data generation pipeline that ensures the creation of high-quality, structured reasoning data for the Audio-Reasoner model. Our approach involves enriching raw audio data with detailed annotations, generating diverse questions, constructing explicit reasoning chains, and implementing a comprehensive validation framework. The following section provides an in-depth analysis of the final CoTA dataset and its reasoning capabilities, with a detailed statistical overview presented in Table 2.", + "bbox": [ + 169, + 412, + 826, + 494 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.3 CoTA Dataset Analysis", + "text_level": 1, + "bbox": [ + 171, + 507, + 375, + 522 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To evaluate the quality and reasoning efficacy of the CoTA dataset, we analyze its design from two key perspectives: (1) comprehensive audio coverage, ensuring broad representation across real-world and synthetic scenarios, and (2) scalability of reasoning complexity, which aligns task difficulty with structured inference patterns. Together, these aspects address critical gaps in audio-language pretraining.", + "bbox": [ + 169, + 526, + 823, + 597 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Comprehensive Audio Coverage. CoTA integrates three audio domains—speech (38.33%), music (14.12%), and environmental sounds (47.55%)—ensuring diverse and representative coverage of real-world auditory contexts. This multi-domain structure captures a wide spectrum of acoustic phenomena, ranging from conversational speech (e.g., speech-to-text translation tasks in CoVoST 2) to intricate musical structures (MusicBench) and fine-grained environmental sound analysis (e.g., AudioSet's rich descriptions of acoustic environments).", + "bbox": [ + 169, + 603, + 826, + 688 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "A distinctive feature of CoTA is its hybrid synthetic-real composition, where synthetic data (Multi-Speaker and Complex Audio, $14.15\\%$ of total samples) is strategically incorporated to enhance complex reasoning tasks, such as multi-step logical inference in Complex Audio. Meanwhile, the majority of the dataset is derived from high-quality real-world sources (e.g., MELD for emotion recognition). By unifying tasks across 10 distinct categories, spanning from fundamental classification to advanced tasks like translation and irony detection, CoTA facilitates a hierarchical learning process—an aspect largely absent in traditional datasets constrained to simple labeling tasks.", + "bbox": [ + 169, + 694, + 826, + 792 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Scalability of Reasoning Complexity. The word count distribution in the CoTA dataset highlights the model's capacity for long-chain reasoning. As shown in Figure 4, most responses fall between 300 and 500 words, allowing for nuanced reasoning and detailed logical steps, particularly in audio and music question answering. This extended length supports transparency in reasoning, ensuring a thorough exploration of complex ideas. For more demanding tasks, such as those in the Multi-Speaker dataset, responses can reach up to 1,500 words. This increase reflects the model's ability to systematically break down intricate problems, demonstrating adaptive reasoning in scenarios requiring a deep understanding of multiple interacting elements.", + "bbox": [ + 169, + 800, + 828, + 912 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/c632a41b5a7f412a8c682f3bc6d71109768d4b1c43941df87876b341f545f899.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 181, + 90, + 392, + 170 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/0bd2aa1ff7c3048ba77b507121a9d0abf1df8f3b78910baab538684bec5b6143.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 395, + 90, + 599, + 170 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/0b63d5816c7d0317a054224a41f1c67e98fb31529cf58d18f1df86ff86ef0e74.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 602, + 90, + 813, + 170 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/fc8096f2ec175706b289de4b055035b882781920208ae3008150038fb4a696d5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 181, + 170, + 392, + 246 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/2de0a197ee7589cf5a822ea1b183f833691d28cda1c03b177d528955bc25806c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 395, + 170, + 601, + 246 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/dd3a7cdcf4bf1022e1ef3b362fc3dc39661fa746dfbd2244684f3c5d5d0bd44f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 602, + 170, + 813, + 246 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d6eae35fa287899c4c41075663ba130ae8f203ee0f1b708b9020787659e4450b.jpg", + "image_caption": [ + "Figure 4: The bar chart shows the data length distribution across nine CoTA sub-datasets, with intervals of 100 on the horizontal axis (0—1200) and proportions on the vertical axis (totaling $100\\%$ ). A moving average trend line is overlaid." + ], + "image_footnote": [], + "bbox": [ + 181, + 246, + 392, + 325 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/9116a5ec7b70fbbbb30739b8027d02e6fc477dfb33fb817358a70343cda2369e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 395, + 246, + 601, + 325 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/44be2c35e1b0d98bd8e74345eeddadcfbb53fdc71670efecae6cebf702ece18c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 602, + 246, + 813, + 325 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Conversely, simpler tasks like S2TT elicit more concise responses, typically between 100 and 200 words. This brevity prevents unnecessary elaboration, maintaining clarity and efficiency when a straightforward answer suffices. By adjusting response length based on task complexity, the model showcases its dynamic reasoning capabilities—balancing depth and precision to effectively address a wide range of analytical challenges. Sec. C introduces more evidence about CoTA's scalability of reasoning complexity.", + "bbox": [ + 169, + 406, + 823, + 491 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 171, + 505, + 313, + 522 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.1 Experimental Setup", + "text_level": 1, + "bbox": [ + 171, + 529, + 352, + 544 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Training Details. Our model, Audio-Reasoner, is built on Qwen2-Audio-Instruct [Chu et al., 2024], comprising a total of 8.4 billion parameters. The training process was conducted using the ms-swift framework [Zhao et al., 2024b], employing supervised fine-tuning with full-parameter tuning. We used a maximum learning rate of 1e-5 and trained the model for a single epoch across the entire CoTA dataset.", + "bbox": [ + 169, + 547, + 823, + 617 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Evaluation Metric. Existing evaluation datasets in the audio domain primarily focus on simple classification tasks or BLEU[Papineni et al., 2002] score-based assessments of model responses. These approaches, however, fail to comprehensively reflect the reasoning capabilities of LALMs. To address this, our evaluation methodology incorporates multiple diverse benchmarks. First, we assessed the model's accuracy on closed-form questions using the MMAU-mini subset [Sakshi et al., 2024]. This subset was specifically chosen since the model's training data does not include multiple-choice questions. Additionally, we evaluated its real-world conversational abilities using the chat and foundation sections of AIR-Bench [Yang et al., 2024b]. These benchmarks encompass various audio modalities, including sound, speech, and music. Beyond these, we also tested the model's performance on traditional audio-related tasks such as speech-to-text translation (S2TT) on CoVoST 2 [Wang et al., 2021] and speech emotion recognition (SER) on MELD dataset [Poria et al., 2019]. A comprehensive summary of the evaluation tasks and datasets is presented in Table 3.", + "bbox": [ + 169, + 626, + 826, + 792 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Baselines: We primarily select state-of-the-art large audio language models as the baselines for comparison. These include the closed-source models Gemini-1.5-pro [Team et al., 2024], GPT-4o [Hurst et al., 2024], Qwen-audio-turbo [Chu et al., 2023], as well as the open-source models SALMONN [Tang et al., 2023], Qwen-Audio-Chat [Chu et al., 2023], and Qwen2-AudioInstruct [Chu et al., 2024] that also serves as the base model. Additionally, we compared cascade model approaches such as Whisper [Radford et al., 2023] + GPT-4 [Achiam et al., 2023] and a series of mainstream multimodal large language models. [Gong et al., 2023b,a, Kong et al., 2024, Ghosh et al., 2024, Liu et al., 2024b, Su et al., 2023, Wu et al., 2024, Wang et al., 2023, Zhang et al., 2023a]", + "bbox": [ + 169, + 800, + 826, + 912 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/c3e3427d62e128e51a15ee3ca40b87c9a1ad286d1b71c5c6a39e8ff92cd8525c.jpg", + "table_caption": [ + "Table 3: Summary of evaluation benchmarks for Audio-Reasoner." + ], + "table_footnote": [], + "table_body": "
DatasetSplitTaskMetric
MMAU-miniSoundSound QAACC
SpeechSpeech QAACC
MusicMusic QAACC
AIR-BenchChat-SoundSound QAGPT-4 Eval
Chat-SpeechSpeech QAGPT-4 Eval
Chat-MusicMusic QAGPT-4 Eval
Chat-MixedAudioComplex Audio QAGPT-4 Eval
Foundation-SoundAQASound QAACC
Foundation-SERSpeech QAACC
Foundation-SICSpeaker Intent ClassificationACC
Foundation-SNVSpeaker Number VerificationACC
Foundation-MusicAQAMusic QAACC
CoVoST 2TestSpeech-to-Text Translation (S2TT)BLEU
MELDTestSpeech Emotion Recognition (SER)ACC
", + "bbox": [ + 176, + 109, + 823, + 411 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.2 Main Results", + "text_level": 1, + "bbox": [ + 171, + 436, + 307, + 450 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "To evaluate the effectiveness of Audio-Reasoner, we compare its performance against both closed-source and open-source baselines on benchmark datasets of MMAU-mini and AIR-Bench chat/foundation, CoVoST 2 (zn/en subset) and MELD. The results in Tables 4, 5, 6, 7 and 8 clearly demonstrate that Audio-Reasoner significantly outperforms existing models, setting a new state-of-the-art in audio reasoning tasks.", + "bbox": [ + 169, + 455, + 826, + 527 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/4cdc42b2122ada9d534b56954388b22fa1a2e3cfc283d42019d98a9267293df7.jpg", + "table_caption": [ + "Table 4: Performance comparison on MMAU-mini. The $\\{\\mathbf{so},\\mathbf{mu},\\mathbf{sp}\\}$ indicates whether \"sound\", \"music\", and \"speech\" have been used in training." + ], + "table_footnote": [], + "table_body": "
ModelSize{so, mu, sp}SoundMusicSpeechAvg
Closed-Source
gpt4o + caption----63.3660.7753.1557.30
gemini-1.5-pro----56.7549.4058.5554.90
Open-Source
LTU7BY Y N22.529.6917.7116.89
LTU-AS7BY Y Y23.359.1020.6017.68
Audio Flamingo - Chat2.2BY Y N23.4215.2611.4116.69
GAMA7BY Y N41.4432.3318.9130.90
GAMA-IT7BY Y N43.2428.4418.9130.20
MU-LLaMA7BN Y N40.8432.6322.2231.90
SALMONN13BY Y Y41.0034.8025.5033.70
Qwen-audio-Chat8.4BY Y Y55.2544.0030.0343.10
Qwen2-Audio-Instruct8.4BY Y Y54.9550.9842.0449.20
Ours
Audio-Reasoner8.4BY Y Y60.0664.3060.7061.71
", + "bbox": [ + 184, + 575, + 810, + 825 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Performance on MMAU-mini. MMAU-mini in Table 4 assesses multimodal audio understanding across three major domains: sound, music, and speech. We first make a comparison with closed-source models. Audio-Reasoner achieves the highest overall score (61.71%) outperforming GPT-4o (57.30%) and Gemini-1.5-Pro (54.90%). Music reasoning shows the most significant improvement—Audio-Reasoner: 64.30%, GPT-4o 60.77% and Gemini-1.5-Pro: 49.40%. This indicates superior", + "bbox": [ + 169, + 840, + 828, + 912 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/495d72673fc5923d3356f92e45a94f3c487f2f71c1b2b4eb8e0ea4f432c28a19.jpg", + "table_caption": [ + "Table 5: Performance comparison on AIR-Bench chat benchmark." + ], + "table_footnote": [], + "table_body": "
ModelSoundMusicSpeechMixed AudioAverage
Closed-Source
Whisper+GPT4--7.54-7.54
Qwen-Audio-Turbo6.595.987.045.776.34
Gemini-1.5-pro5.495.066.975.275.70
Open-Source
Macaw-LLM1.010.910.971.001.01
SpeechGPT0.950.951.571.141.15
Next-gpt4.764.183.862.924.13
Pandagpt5.465.063.582.934.25
BLSP5.555.086.174.525.33
Qwen-Audio6.955.526.475.386.08
SALMONN6.285.956.166.086.11
Qwen2-Audio-Instruct6.996.797.186.776.93
Ours
Audio-Reasoner7.688.058.196.657.94
", + "bbox": [ + 220, + 108, + 774, + 363 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "musical structure comprehension, enabled by CoTA's diverse music-based tasks. Speech-based reasoning is also notably strong—Audio-Reasoner: $60.70\\%$ , GPT-4o: $53.15\\%$ and Gemini-1.5-Pro: $58.55\\%$ . This validates CoTA's impact in training models for context-dependent spoken language understanding. In comparison with open-source models, Audio-Reasoner surpasses all open-source models, with the next-best, Qwen2-Audio-Instruct, trailing by 12.51 percentage points $(49.20\\%)$ . Across individual domains, Audio-Reasoner achieves $60.06\\%$ in sound reasoning (beating Qwen2-Audio's $54.95\\%$ ), $64.30\\%$ in music (outperforming Qwen2-Audio's $50.98\\%$ ), and $60.70\\%$ in speech (exceeding Qwen2-Audio's $42.04\\%$ ).", + "bbox": [ + 169, + 387, + 826, + 500 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Performance on AIR-Bench chat. (1) chat benchmark. AIR-Bench chat in Table 5 evaluates contextual and conversational reasoning across four audio types: sound, music, speech, and mixed audio. Regarding closed-source models, Audio-Reasoner achieves the highest overall score (7.94), outperforming Gemini-1.5-Pro (5.70) and Whisper+GPT-4 (7.54). It shows the most significant improvements in music (8.05) and speech (8.19). Additionally, its mixed audio reasoning score (6.65) demonstrates proficiency in handling multi-source audio tasks. As for comparison with open-source models, Audio-Reasoner sets a new benchmark, surpassing Qwen2-Audio (6.93) by 1.01 points. Across domains, it achieves 7.68 in sound (beating Qwen2-Audio's 6.99), 8.05 in music (exceeding Qwen2-Audio's 6.79), and 8.19 in speech (outperforming Qwen-2-Audio's 7.18), showcasing balanced expertise.", + "bbox": [ + 169, + 505, + 826, + 643 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/0b50cd8936feda0adcc0ab791100fca33a52a65b976782522081f048a2a42f41.jpg", + "table_caption": [ + "Table 6: Performance comparison on AIR-Bench foundation benchmark." + ], + "table_footnote": [], + "table_body": "
ModelAIR-SoundAIR-MusicAIR-SpeechAverage
SoundAQAMusicAQASERSICSNV
Closed-Source
whisper+GPT4--59.587.730.059.1
Qwen-Audio-Turbo62.862.560.056.454.359.2
Open-Source
NEXT-GPT18.847.125.725.625.428.5
SpeechGPT33.931.337.645.832.636.2
BLSP36.131.027.446.628.133.8
PandaGPT48.750.726.028.543.239.4
SALMONN28.454.629.936.734.336.8
Qwen-Audio-Chat64.648.243.277.835.353.8
Ours
Audio-Reasoner65.755.260.588.156.365.2
", + "bbox": [ + 181, + 676, + 810, + 910 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 935, + 509, + 946 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "(2) foundation benchmark. AIR-Bench foundation in Table 6 evaluates fundamental audio understanding across three primary categories: sound, music, and speech, with speech further divided into three subdomains: Speech Emotion Recognition (SER), Speaker Identification and Classification (SIC), and Speech Number Variation (SNV). Audio-Reasoner achieves the highest overall score (65.2), outperforming both closed-source and open-source baselines. Compared to the strongest closed-source model, Qwen-Audio-Turbo (59.2), Audio-Reasoner leads by 6.0 points, demonstrating superior reasoning across all audio domains. With the sound category, Audio-Reasoner attains 65.7, surpassing Qwen-Audio-Chat (64.6) and Qwen-Audio-Turbo (62.8), highlighting its strong ability in environmental and non-speech audio understanding. For music reasoning, Audio-Reasoner achieves 55.2, significantly outperforming Qwen-Audio-Turbo (48.2) and all open-source baselines, indicating better comprehension of musical structures and attributes. Regarding speech reasoning, Audio-Reasoner sets new state-of-the-art results across all subdomains. It attains 60.5 in SER (v.s. Qwen-Audio-Turbo's 60.0), 88.1 in SIC (surpassing Whisper+GPT-4's 87.7), and 56.3 in SNV (exceeding Qwen-Audio-Turbo's 54.3). The substantial lead in SIC showcases its exceptional speaker recognition capability, benefiting from CoTA's step-by-step reasoning process.", + "bbox": [ + 169, + 90, + 826, + 299 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/05c185e32b1c6a4fde3834169001890bcc75c06534623c92190fb89b5e332d73.jpg", + "table_caption": [ + "Table 7: Performance comparison of the speech-to-text translation (S2TT) task on CoVoST 2 dataset. We consider the mutual conversion between Chinese and English as training and evaluation data." + ], + "table_footnote": [], + "table_body": "
ModelEN-ZNZN-ENAvg
BLEU1BLEU2BLEU3BLEU4AvgBLEU1BLEU2BLEU3BLEU4Avg
Closed-Source
Gemini-1.5-pro68.2549.1237.8129.7946.2451.8326.6116.2710.8826.3936.32
Open-Source
Qwen2-Audio-Instruct58.6339.5528.7121.4037.0748.5224.3114.659.2424.1830.63
Ours
Audio-Reasoner72.8954.1742.4633.9550.8756.5029.9918.3711.6229.1340.00
", + "bbox": [ + 174, + 345, + 820, + 472 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Performance on CoVoST 2. The CoVoST 2 dataset in Table 7 evaluates speech-to-text translation, a fundamental task in cross-lingual speech understanding. Audio-Reasoner demonstrates the strengths of Audio-Reasoner in speech-to-text translation across both English-to-Chinese (EN-ZN) and Chinese-to-English (ZN-EN) tasks.", + "bbox": [ + 169, + 484, + 826, + 542 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "For EN-ZN translation, Audio-Reasoner outperforms both closed-source Gemini-1.5-pro and open-source Qwen2-Audio-Instruct. With an average BLEU score of 50.87, it surpasses Gemini-1.5-pro's score of 46.24 by 4.63 points and Qwen2-Audio-Instruct's 37.07 by a significant 13.80 points. Audio-Reasoner's BLEU-4 score of 33.95 highlights its ability to generate fluent, high-quality translations, even for more complex sentence structures. In the ZN-EN translation task, Audio-Reasoner continues to show superiority with an average BLEU score of 29.13, outperforming Gemini-1.5-pro (with a score of 26.39) by 2.74 points and Qwen2-Audio-Instruct (with a score of 24.18) by 4.95 points. Its BLEU-4 score of 11.62 reflects an enhanced ability to produce coherent and accurate translations, particularly in more challenging, longer sentences.", + "bbox": [ + 169, + 547, + 826, + 672 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "These results demonstrate that Audio-Reasoner excels in both capturing cross-lingual semantic alignment, consistently outperforming existing models in speech-to-text translation tasks.", + "bbox": [ + 169, + 678, + 823, + 708 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/6ddeec26dc0b46ed82c9b7d248215cee84c766c54b928e445ab9205f13c4bba5.jpg", + "table_caption": [ + "Table 8: Performance comparison of the speech emotion recognition (SER) task on MELD dataset." + ], + "table_footnote": [], + "table_body": "
ModelUnweighted_ACC
EMO-box31.5
SALMONN39.2
Qwen2-Audio-Instruct49.9
Audio-Reasoner53.9
", + "bbox": [ + 339, + 739, + 651, + 814 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Performance on MELD. The MELD dataset in Table 8 evaluates speech emotion recognition, a crucial task for understanding affective states in spoken language. Audio-Reasoner achieves the highest unweighted accuracy of 53.9, outperforming all baseline models, including the previous state-of-the-art Qwen2-Audio-Instruct (49.9) by 4.0 percentage points. The results further validate that Audio-Reasoner excels in emotion perception and contextual audio understanding, solidifying its effectiveness in real-world speech emotion recognition tasks.", + "bbox": [ + 169, + 827, + 823, + 912 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/9322ca54bc2fde2f6f2c9939c1e41fb674388081d73e82a1630c156cc78330a8.jpg", + "image_caption": [ + "Figure 5: Example of Audio-Reasoner answering music-based question." + ], + "image_footnote": [], + "bbox": [ + 189, + 102, + 215, + 126 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Analyze the interplay between the guitar riff and the spoken commentary. How do they contribute to the overall impression or potential purpose of the piece?", + "bbox": [ + 233, + 104, + 759, + 126 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "1", + "text_level": 1, + "bbox": [ + 236, + 128, + 331, + 143 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/2037f12e5111931657463be8d6a3644c6f01c10ec8a126d8d0536b94ae5f6035.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 779, + 148, + 807, + 180 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "", + "text_level": 1, + "bbox": [ + 202, + 156, + 245, + 165 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": " The question asks about interplay and contribution. Analyze the music clip, focusing on: Characterizing the guitar riff: style, rhythm, melodic features. Characterizing the spoken commentary: tone, content, frequency. Analyzing their relationship: explain, praise, interrupt. Inferring the overall impression and purpose: instruction, demonstration, etc.? ", + "bbox": [ + 210, + 166, + 759, + 189 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": " The audio has a blues - rock guitar riff on a clean - toned electric guitar, moderate tempo, 4/4 time. There's male spoken commentary, informal and relaxed, suggesting instruction or informal performance.", + "bbox": [ + 210, + 195, + 764, + 213 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": " Consider scenarios:", + "bbox": [ + 210, + 218, + 352, + 227 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "**Scenario 1: Instructional Context", + "bbox": [ + 210, + 227, + 344, + 234 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "If commentary explains riff aspects (e.g., \"bend on third string\", \"E minor blues progression\"), it's instructional. Guitar demonstrates, commentary clarifies. If it gives positive reinforcement (\"very cool riff, man\"), it's a casual lesson or performance with teaching.", + "bbox": [ + 210, + 234, + 753, + 250 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "**Scenario 2: Informal Performance/Demonstration", + "bbox": [ + 210, + 251, + 406, + 260 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "If commentary is minimal (reactions like \"yeah\", \"alright\"), it's an informal performance. Riffs simplicity and repetition suggest a demonstration for less - experienced musicians.", + "bbox": [ + 210, + 260, + 761, + 276 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The audio leans towards scenario 1. \"very cool riff, man\" implies positive assessment for teaching or demonstration. Clean tone and moderate pace suggest instruction/demonstration, not a raw blues performance.", + "bbox": [ + 210, + 277, + 766, + 295 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Refining: E minor blues with E - B chord progression and C# implies commentary helps demonstrate their use. ", + "bbox": [ + 210, + 295, + 692, + 303 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": " The blues - rock, moderate - tempo guitar riff is complemented by relaxed, informal spoken commentary. The interplay indicates a purpose of instruction or demonstration of the riff's construction. The positive comment supports an informal teaching or relaxed demonstration.", + "bbox": [ + 210, + 311, + 754, + 335 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "", + "text_level": 1, + "bbox": [ + 202, + 337, + 248, + 345 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "", + "bbox": [ + 202, + 347, + 269, + 357 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The interplay creates an impression of informal instruction or demonstration. The clean - toned guitar plays a moderate - tempo blues - rock riff, and the male voice gives relaxed commentary like \"very cool riff, man\". The riff's simplicity, moderate pace, and E minor blues elements suggest the purpose is to demonstrate the riff and how to play its features, not a formal performance or raw blues.", + "bbox": [ + 210, + 357, + 764, + 382 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "", + "bbox": [ + 202, + 383, + 272, + 392 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Why Audio-Reasoner Excels. While open-source models typically lag behind closed-source alternatives, Audio-Reasoner narrows this gap significantly. It outperforms Gemini-1.5-Pro in multiple categories and achieves competitive results against GPT-4o. A key factor driving Audio-Reasoner's state-of-the-art performance is the CoTA dataset, which provides an unparalleled foundation for comprehensive, structured, and context-aware audio reasoning. The dataset contributes to Audio-Reasoner's success in two critical ways: (1) CoTA's comprehensive audio reasoning data. Unlike models trained on fragmented or task-specific datasets, Audio-Reasoner benefits from CoTA's extensive and balanced coverage of sound, speech, and music-based reasoning. This ensures robust adaptation across diverse audio tasks, enhancing its ability to generalize across multimodal benchmarks. (2) CoTA's strong logical and contextual inference. CoTA's emphasis on structured reasoning and contextual awareness enables Audio-Reasoner to outperform existing models in long-form reasoning (MMAU-mini) and conversational audio understanding (AIR-Bench chat). By leveraging deep logical inference and nuanced context retention, it surpasses prior approaches in handling complex speech interactions and multimodal reasoning.", + "bbox": [ + 169, + 477, + 826, + 672 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "These strengths position Audio-Reasoner as a breakthrough in open-source audio intelligence, setting a new benchmark for multimodal reasoning in speech comprehension, translation, and real-world audio interpretation.", + "bbox": [ + 169, + 690, + 823, + 733 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "4.3 Case Study", + "text_level": 1, + "bbox": [ + 171, + 756, + 290, + 771 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Here we show a case in Figure 5, demonstrating the audio-based reasoning capability of Audio-Reasoner. The system analyzes the interplay between a guitar riff and a spoken commentary, systematically breaking down their characteristics and relationship. It identifies key musical features, assesses the commentary's tone and intent, and infers the overall purpose of the piece. By considering different scenarios, Audio-Reasoner determines that the interplay suggests an informal instructional or demonstrative context rather than a formal performance. This example highlights the model's ability to extract meaningful insights from audio, combining musical analysis with contextual interpretation.", + "bbox": [ + 169, + 780, + 826, + 878 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "For more remarkable examples, please refer to Sec. D.", + "bbox": [ + 171, + 897, + 529, + 912 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "5 Conclusion", + "text_level": 1, + "bbox": [ + 171, + 89, + 302, + 106 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In this work, we introduced Audio-Reasoner, a large audio language model (LALM) designed to advance deep reasoning in audio-based tasks. By leveraging inference scaling and structured chain-of-thought (CoT) reasoning, we demonstrated significant performance improvements across key benchmarks. Central to our approach is CoTA, a large-scale, high-quality dataset containing around 1.2 million structured reasoning samples, which we generated through a systematic pipeline of annotation refinement, question synthesis, and CoT generation.", + "bbox": [ + 169, + 113, + 826, + 198 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Our experimental results highlight the effectiveness of structured reasoning in the audio domain, achieving state-of-the-art performance on MMAU-mini $(+25.04\\%)$ , CoVoST 2 $(+8.31\\%)$ , and MELD $(+8.01\\%)$ . These findings underscore the critical role of reasoning-rich datasets and inference scaling in multimodal learning, particularly for audio-based tasks where existing models struggle with complex reasoning.", + "bbox": [ + 169, + 203, + 826, + 273 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Moving forward, we believe that structured CoT methodologies will play a crucial role in enhancing the reasoning capabilities of LALMs. Future research directions include extending Audio-Reasoner's capabilities to multi-turn dialogue reasoning, cross-modal knowledge integration, and real-world applications in speech-driven AI systems. By publicly releasing our model, dataset, and code, we aim to foster further innovation and collaboration in the field of audio reasoning, paving the way for more advanced and interpretable LALMs.", + "bbox": [ + 169, + 279, + 826, + 363 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 171, + 376, + 269, + 391 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023.", + "Cameron B Browne, Edward Powley, Daniel Whitehouse, Simon M Lucas, Peter I Cowling, Philipp Rohlfshagen, Stephen Tavener, Diego Perez, Spyridon Samothrakis, and Simon Colton. A survey of monte carlo tree search methods. IEEE Transactions on Computational Intelligence and AI in Games (T-CIAIG), (1):1-43, 2012.", + "Santiago Castro, Devamanyu Hazarika, Verónica Pérez-Rosas, Roger Zimmermann, Rada Mihalcea, and Soujanya Poria. Towards multimodal sarcasm detection. In Annual Meeting of the Association for Computational Linguistics (ACL), pages 4619-4629, 2019.", + "Yunfei Chu, Jin Xu, Xiaohuan Zhou, Qian Yang, Shiliang Zhang, Zhijie Yan, Chang Zhou, and Jingren Zhou. Qwen-audio: Advancing universal audio understanding via unified large-scale audio-language models. arXiv preprint arXiv:2311.07919, 2023.", + "Yunfei Chu, Jin Xu, Qian Yang, Haojie Wei, Xipin Wei, Zhifang Guo, Yichong Leng, Yuanjun Lv, Jinzheng He, Junyang Lin, et al. Qwen2-audio technical report. arXiv preprint arXiv:2407.10759, 2024.", + "Yingqian Cui, Pengfei He, Xianfeng Tang, Qi He, Chen Luo, Jiliang Tang, and Yue Xing. A theoretical understanding of chain-of-thought: Coherent reasoning and error-aware demonstration. arXiv preprint arXiv:2410.16540, 2024.", + "Alexandre Defossez, Laurent Mazare, Manu Orsini, Amelie Royer, Patrick Pérez, Hervé Jégou, Edouard Grave, and Neil Zeghidour. Moshi: a speech-text foundation model for real-time dialogue. arXiv preprint arXiv:2410.00037, 2024.", + "Yuntian Deng, Yejin Choi, and Stuart Shieber. From explicit cot to implicit cot: Learning to internalize cot step by step. arXiv preprint arXiv:2405.14838, 2024.", + "Konstantinos Drossos, Samuel Lipping, and Tuomas Virtanen. Clotho: An audio captioning dataset. In International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 736-740, 2020.", + "Zhihao Du, Yuxuan Wang, Qian Chen, Xian Shi, Xiang Lv, Tianyu Zhao, Zhifu Gao, Yexin Yang, Changfeng Gao, Hui Wang, et al. Cosyvoice 2: Scalable streaming speech synthesis with large language models. arXiv preprint arXiv:2412.10117, 2024." + ], + "bbox": [ + 173, + 398, + 828, + 912 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Logan Engstrom, Andrew Ilyas, Shibani Santurkar, Dimitris Tsipras, Firdaus Janoos, Larry Rudolph, and Aleksander Madry. Implementation matters in deep policy gradients: A case study onppo and trpo. arXiv preprint arXiv:2005.12729, 2020.", + "Hao Fei, Shengqiong Wu, Wei Ji, Hanwang Zhang, Meishan Zhang, Mong Li Lee, and Wynne Hsu. Video-of-thought: step-by-step video reasoning from perception to cognition. In International Conference on Machine Learning (ICML), pages 13109-13125, 2024.", + "Chaoyou Fu, Haojia Lin, Xiong Wang, Yi-Fan Zhang, Yunhang Shen, Xiaoyu Liu, Yangze Li, Zuwei Long, Heting Gao, Ke Li, et al. Vita-1.5: Towards gpt-4o level real-time vision and speech interaction. arXiv preprint arXiv:2501.01957, 2025.", + "Jort F Gemmeke, Daniel PW Ellis, Dylan Freedman, Aren Jansen, Wade Lawrence, R Channing Moore, Manoj Plakal, and Marvin Ritter. Audio set: An ontology and human-labeled dataset for audio events. In International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 776-780. IEEE, 2017.", + "Sreyan Ghosh, Sonal Kumar, Ashish Seth, Chandra Kiran Reddy Evuru, Utkarsh Tyagi, S Sakshi, Oriol Nieto, Ramani Duraiswami, and Dinesh Manocha. Gama: A large audio-language model with advanced audio understanding and complex reasoning abilities. In Empirical Methods in Natural Language Processing, pages 6288-6313, 2024.", + "Yuan Gong, Alexander H Liu, Hongyin Luo, Leonid Karlinsky, and James Glass. Joint audio and speech understanding. In Automatic Speech Recognition and Understanding Workshop (ASRU), pages 1-8, 2023a.", + "Yuan Gong, Hongyin Luo, Alexander H Liu, Leonid Karlinsky, and James Glass. Listen, think, and understand. arXiv preprint arXiv:2305.10790, 2023b.", + "Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025.", + "Jarvis Guo, Tuney Zheng, Yuelin Bai, Bo Li, Yubo Wang, King Zhu, Yizhi Li, Graham Neubig, Wenhu Chen, and Xiang Yue. Mammoth-vl: Eliciting multimodal reasoning with instruction tuning at scale. arXiv preprint arXiv:2412.05237, 2024.", + "Songhao Han, Wei Huang, Hairong Shi, Le Zhuo, Xiu Su, Shifeng Zhang, Xu Zhou, Xiaojuan Qi, Yue Liao, and Si Liu. Videospresso: A large-scale chain-of-thought dataset for fine-grained video reasoning via core frame selection. arXiv preprint arXiv:2411.14794, 2024.", + "Binyuan Hui, Jian Yang, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Lei Zhang, Tianyu Liu, Jiajun Zhang, Bowen Yu, Keming Lu, et al. Qwen2. 5-coder technical report. arXiv preprint arXiv:2409.12186, 2024.", + "Aaron Hurst, Adam Lerner, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024.", + "Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024.", + "Feihu Jin, Yifan Liu, and Ying Tan. Zero-shot chain-of-thought reasoning guided by evolutionary algorithms in large language models. arXiv preprint arXiv:2402.05376, 2024.", + "Chris Dongjoo Kim, Byeongchang Kim, Hyunmin Lee, and Gunhee Kim. Audiocaps: Generating captions for audios in the wild. In *Nations of the Americas* Chapter of the Association for Computational Linguistics (NAACL), pages 119-132, 2019.", + "Zhifeng Kong, Arushi Goel, Rohan Badlani, Wei Ping, Rafael Valle, and Bryan Catanzaro. Audio flamingo: A novel audio language model with few-shot learning and dialogue abilities. In International Conference on Machine Learning (ICML), pages 25125-25148, 2024." + ], + "bbox": [ + 171, + 90, + 826, + 912 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024a.", + "Shansong Liu, Atin Sakkeer Hussain, Chenshuo Sun, and Ying Shan. Music understanding llama: Advancing text-to-music generation with question answering and captioning. In International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 286-290, 2024b.", + "Ziyang Ma, Mingjie Chen, Hezhao Zhang, Zhisheng Zheng, Wenxi Chen, Xiquan Li, Jiaxin Ye, Xie Chen, and Thomas Hain. Emobox: Multilingual multi-corpus speech emotion recognition toolkit and benchmark. arXiv preprint arXiv:2406.07162, 2024.", + "Ziyang Ma, Zhuo Chen, Yuping Wang, Eng Siong Chng, and Xie Chen. Audio-cot: Exploring chain-of-thought reasoning in large audio language model. arXiv preprint arXiv:2501.07246, 2025.", + "Jan Melechovsky, Zixun Guo, Deepanway Ghosal, Navonil Majumder, Dorien Herremans, and Soujanya Poria. Mustango: Toward controllable text-to-music generation. In *Nations of the Americas Chapter of the Association for Computational Linguistics (NAACL)*, pages 8286-8309, 2024.", + "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling. arXiv preprint arXiv:2501.19393, 2025.", + "Vassil Panayotov, Guoguo Chen, Daniel Povey, and Sanjeev Khudanpur. Librispeech: an asr corpus based on public domain audio books. In International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 5206-5210, 2015.", + "Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. Bleu: a method for automatic evaluation of machine translation. In Annual Meeting of the Association for Computational Linguistics (ACL), pages 311-318, 2002.", + "Soujanya Poria, Devamanyu Hazarika, Navonil Majumder, Gautam Naik, Erik Cambria, and Rada Mihalcea. Meld: A multimodal multi-party dataset for emotion recognition in conversations. In Annual Meeting of the Association for Computational Linguistics (ACL), pages 527-536, 2019.", + "Yujia Qin, Shihao Liang, Yining Ye, Kunlun Zhu, Lan Yan, Yaxi Lu, Yankai Lin, Xin Cong, Xiangru Tang, Bill Qian, et al. Toolllm: Facilitating large language models to master 16000+ real-world apis. arXiv preprint arXiv:2307.16789, 2023.", + "Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, and Ilya Sutskever. Robust speech recognition via large-scale weak supervision. In International Conference on Machine Learning (ICML), pages 28492-28518, 2023.", + "Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. In Advances in Neural Information Processing Systems (NeurIPS), pages 53728-53741, 2023.", + "S Sakshi, Utkarsh Tyagi, Sonal Kumar, Ashish Seth, Ramaneswaran Selvakumar, Oriol Nieto, Ramani Duraiswami, Sreyan Ghosh, and Dinesh Manocha. Mmau: A massive multi-task audio understanding and reasoning benchmark. In International Conference on Learning Representations (ICLR), 2024.", + "Hao Shao, Shengju Qian, Han Xiao, Guanglu Song, Zhuofan Zong, Letian Wang, Yu Liu, and Hongsheng Li. Visual cot: Unleashing chain-of-thought reasoning in multi-modal language models. arXiv preprint arXiv:2403.16999, 2024a.", + "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024b." + ], + "bbox": [ + 173, + 90, + 825, + 912 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Zayne Sprague, Fangcong Yin, Juan Diego Rodriguez, Dongwei Jiang, Manya Wadhwa, Prasann Singhal, Xinyu Zhao, Xi Ye, Kyle Mahowald, and Greg Durrett. To cot or not to cot? chain-of-thought helps mainly on math and symbolic reasoning. arXiv preprint arXiv:2409.12183, 2024.", + "Kaya Stechly, Karthik Valmeekam, and Subbarao Kambhampati. Chain of thoughtlessness? an analysis of cot in planning. In Advances in Neural Information Processing Systems (NeurIPS), pages 29106-29141, 2024.", + "Yixuan Su, Tian Lan, Huayang Li, Jialu Xu, Yan Wang, and Deng Cai. Pandagpt: One model to instruction-follow them all. In Workshop on Taming Large Language Models: Controllability in the era of Interactive Assistants (TLLM), pages 11-23, 2023.", + "Changli Tang, Wenyi Yu, Guangzhi Sun, Xianzhao Chen, Tian Tan, Wei Li, Lu Lu, Zejun Ma, and Chao Zhang. *Salmon: Towards generic hearing abilities for large language models.* arXiv preprint arXiv:2310.13289, 2023.", + "Yunlong Tang, Gen Zhan, Li Yang, Yiting Liao, and Chenliang Xu. Cardiff: Video salient object ranking chain of thought reasoning for saliency prediction with diffusion. arXiv preprint arXiv:2408.12009, 2024.", + "Gemini Team, Petko Georgiev, Ving Ian Lei, Ryan Burnell, Libin Bai, Anmol Gulati, Garrett Tanzer, Damien Vincent, Zhufeng Pan, Shibo Wang, et al. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530, 2024.", + "Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025.", + "Miles Turpin, Julian Michael, Ethan Perez, and Samuel Bowman. Language models don't always say what they think: Unfaithful explanations in chain-of-thought prompting. In Advances in Neural Information Processing Systems (NeurIPS), pages 74952-74965, 2023.", + "Changhan Wang, Anne Wu, Jiatao Gu, and Juan Pino. Covost 2 and massively multilingual speech translation. In Conference of the International Speech Communication Association (Interspeech), pages 2247-2251, 2021.", + "Chen Wang, Minpeng Liao, Zhongqiang Huang, Jinliang Lu, Junhong Wu, Yuchen Liu, Chengqing Zong, and Jiajun Zhang. Blsp: Bootstrapping language-speech pre-training via behavior alignment of continuation writing. arXiv preprint arXiv:2309.00916, 2023.", + "Yan Wang, Yawen Zeng, Jingsheng Zheng, Xiaofen Xing, Jin Xu, and Xiangmin Xu. Videocot: A video chain-of-thought dataset with active annotation tool. In Workshop on Advances in Language and Vision Research (ALVR), pages 92-101, 2024.", + "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. In Advances in Neural Information Processing Systems (NeurIPS), pages 24824-24837, 2022.", + "Peng Wen, Teng-Gen Hu, Robert J Linhardt, Sen-Tai Liao, Hong Wu, and Yu-Xiao Zou. Mulberry: A review of bioactive compounds and advanced processing technology. Trends in food science & technology, 83:138-158, 2019.", + "Shengqiong Wu, Hao Fei, Leigang Qu, Wei Ji, and Tat-Seng Chua. Next-gpt: Any-to-any multimodal llm. In International Conference on Machine Learning (ICML), pages 53366-53397, 2024.", + "Zhifei Xie and Changqiao Wu. Mini-omni: Language models can hear, talk while thinking in streaming. arXiv preprint arXiv:2408.16725, 2024a.", + "Zhifei Xie and Changqiao Wu. Mini-omni2: Towards open-source gpt-4o with vision, speech and duplex capabilities. arXiv preprint arXiv:2410.11190, 2024b.", + "Guowei Xu, Peng Jin, Li Hao, Yibing Song, Lichao Sun, and Li Yuan. Llava-o1: Let vision language models reason step-by-step. arXiv preprint arXiv:2411.10440, 2024." + ], + "bbox": [ + 173, + 90, + 826, + 912 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, et al. Qwen2. 5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122, 2024a.", + "Qian Yang, Jin Xu, Wenrui Liu, Yunfei Chu, Ziyue Jiang, Xiaohuan Zhou, Yichong Leng, Yuanjun Lv, Zhou Zhao, Chang Zhou, et al. Air-bench: Benchmarking large audio-language models via generative comprehension. In Annual Meeting of the Association for Computational Linguistics (ACL), pages 1979-1998, 2024b.", + "Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Tom Griffiths, Yuan Cao, and Karthik Narasimhan. Tree of thoughts: Deliberate problem solving with large language models. In Advances in Neural Information Processing Systems (NeurIPS), pages 11809-11822, 2023.", + "Yongcheng Zeng, Guoqing Liu, Weiyu Ma, Ning Yang, Haifeng Zhang, and Jun Wang. Token-level direct preference optimization. In International Conference on Machine Learning (ICML), pages 58348-58365, 2024.", + "Dong Zhang, Shimin Li, Xin Zhang, Jun Zhan, Pengyu Wang, Yaqian Zhou, and Xipeng Qiu. Speechgpt: Empowering large language models with intrinsic cross-modal conversational abilities. arXiv preprint arXiv:2305.11000, 2023a.", + "Dong Zhang, Shimin Li, Xin Zhang, Jun Zhan, Pengyu Wang, Yaqian Zhou, and Xipeng Qiu. Speechgpt: Empowering large language models with intrinsic cross-modal conversational abilities. In Empirical Methods in Natural Language Processing (EMNLP), pages 15757-15773, 2023b.", + "Ruohong Zhang, Bowen Zhang, Yanghao Li, Haotian Zhang, Zhiqing Sun, Zhe Gan, Yinfei Yang, Ruoming Pang, and Yiming Yang. Improve vision language model chain-of-thought reasoning. arXiv preprint arXiv:2410.16198, 2024a.", + "Yuxiang Zhang, Shangxi Wu, Yuqi Yang, Jiangming Shu, Jinlin Xiao, Chao Kong, and Jitao Sang. o1-coder: an o1 replication for coding. arXiv preprint arXiv:2412.00154, 2024b.", + "Zhuosheng Zhang, Aston Zhang, Mu Li, and Alex Smola. Automatic chain of thought prompting in large language models. arXiv preprint arXiv:2210.03493, 2022.", + "Yu Zhao, Huifeng Yin, Bo Zeng, Hao Wang, Tianqi Shi, Chenyang Lyu, Longyue Wang, Weihua Luo, and Kaifu Zhang. Marco-ol: Towards open reasoning models for open-ended solutions. arXiv preprint arXiv:2411.14405, 2024a.", + "Yuze Zhao, Jintao Huang, Jinghan Hu, Xingjun Wang, Yunlin Mao, Daoze Zhang, Zeyinzi Jiang, Zhikai Wu, Baole Ai, Ang Wang, et al. Swift: a scalable lightweight infrastructure for fine-tuning. arXiv preprint arXiv:2408.05517, 2024b.", + "Qiji Zhou, Ruochen Zhou, Zike Hu, Panzhong Lu, Siyang Gao, and Yue Zhang. Image-of-thought prompting for visual reasoning refinement in multimodal large language models. arXiv preprint arXiv:2405.13872, 2024.", + "Anni Zou, Zhuosheng Zhang, Hai Zhao, and Xiangru Tang. Generalizable chain-of-thought prompting in mixed-task scenarios with large language models. arXiv preprint arXiv:2310.06692, 2023." + ], + "bbox": [ + 173, + 90, + 826, + 723 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "A Prompt Details", + "text_level": 1, + "bbox": [ + 171, + 89, + 338, + 107 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "A universally applicable method for writing prompts involves three key components: a clear task definition, a structured example, and a precise format specification. Our prompt adheres to this methodology by first defining the task explicitly, outlining the need for detailed audio descriptions and progressively challenging questions. It then provides a structured example that demonstrates the expected output format, ensuring clarity and minimizing ambiguity. Lastly, it specifies the exact formatting rules using delimiters such as ... and ..., ensuring consistency in responses. This approach guarantees efficiency by eliminating interpretative variance, allowing for precise and reproducible outputs. When drafting this prompt, we adhered to a structured approach to maximize clarity and effectiveness. The first-person perspective is used to emphasize our direct involvement in designing the task, ensuring the reader understands the rationale behind each structural choice. The structure follows a logical progression: we begin by introducing the general method, transition into an explanation of how our prompt aligns with this method, and conclude by justifying the approach's efficiency. By maintaining an academic tone, we reinforce the credibility and rigor of our prompt-writing methodology. We list some of the prompts used for tasks at different stages following below.", + "bbox": [ + 169, + 113, + 826, + 321 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "A.1 Prompt of Stage 1 when Processing Data (Sample from AudioSet)", + "text_level": 1, + "bbox": [ + 171, + 330, + 671, + 345 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We are annotating some audio and designing some questions. You are an excellent audio analyst. Next, you will receive an audio and one absolutely correct but simple description. Your task is to first generate a more detailed, in-depth and absolutely correct new description based on the given descriptions. Then, use this description to generate three open-ended or single-choice questions with four options along with their answers. Please separate different parts using ...\n...\n...\n\n...\n....", + "bbox": [ + 194, + 359, + 802, + 472 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Here is a sample. Please strictly follow the format in the sample. The audio presents a sustained, high-frequency static noise, characteristic of a detuned or malfunctioning electronic device, likely a television or radio...Describe the characteristics of the static noise in the audio, and how these characteristics change over time. What...? ", + "bbox": [ + 194, + 483, + 802, + 583 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Here is the original description: *** label here ***.", + "bbox": [ + 196, + 593, + 542, + 607 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Here is the audio.", + "bbox": [ + 197, + 608, + 316, + 621 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Please design three questions that gradually become more challenging, starting from basic factual questions, but don't deviate from the content of the audio itself. If it's a single-choice question, please give four options like (a) one, (b) two, .... and the answer should be analyzed and end with a format like \"the answer is B.\"", + "bbox": [ + 197, + 622, + 800, + 676 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "A.2 Prompt of Stage 2 when Processing Data (Sample from AudioSet)", + "text_level": 1, + "bbox": [ + 171, + 90, + 671, + 107 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "We are now designing a system to generate structured audio-based chain-of-thought reasoning data. You will receive an audio clip, its textual description, as well as a question and its answer. Your task is to explore in more detail the thinking process from the question to the answer. Your chain of thought will be divided into two main parts, the and sections. In the section, you need to go through four steps: planning, captioning, reasoning, and summarizing. The section is invisible to the user. Therefore, in the section, you need to base on all the reasoning processes and results in the section and provide a final reply based on the question. Finally, your reply should strictly follow the following format: (In this part, analyze the user's needs and how to complete the task. If the problem is complex, it should be done step by step) (In this part, conduct an overall analysis of the given audio input content, try to find all the parts related to the question, describe them, and ensure it is completely correct.) (Start reasoning towards the final answer here) (Draw appropriate results based on the reasoning part here) Give the final answer here referring to the part Please strictly follow the format of the sample.", + "bbox": [ + 197, + 119, + 799, + 343 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Sample:", + "bbox": [ + 197, + 354, + 256, + 368 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "", + "bbox": [ + 197, + 369, + 269, + 382 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "", + "bbox": [ + 197, + 383, + 302, + 395 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The user wants to understand the dynamic changes within the provided audio clip ....", + "bbox": [ + 197, + 396, + 759, + 410 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "", + "bbox": [ + 197, + 411, + 305, + 422 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "", + "bbox": [ + 197, + 424, + 289, + 436 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The audio clip predominantly features static noise. .... similar to that of a detuned television or a device failing to receive a signal.", + "bbox": [ + 197, + 436, + 797, + 465 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "", + "bbox": [ + 197, + 465, + 294, + 478 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "", + "bbox": [ + 197, + 479, + 312, + 491 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "1. Identify changes in Intensity (Volume): The audio's static noise does not remain at a constant volume. There are noticeable increases and decreases in loudness throughout the clip. .... is struggling to maintain a consistent output, adding to the impression of something malfunctioning or broken.", + "bbox": [ + 197, + 493, + 797, + 547 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "", + "bbox": [ + 197, + 547, + 316, + 560 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "", + "bbox": [ + 197, + 561, + 302, + 574 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The static noise in the audio is highly dynamic. .... leading to a sense of disorder and instability.", + "bbox": [ + 197, + 575, + 797, + 603 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "", + "bbox": [ + 197, + 604, + 305, + 616 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "", + "bbox": [ + 197, + 617, + 274, + 628 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "", + "bbox": [ + 197, + 631, + 300, + 643 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The audio presents a static noise, .... The overall effect is one of energetic chaos, preventing any possibility of calm or predictability.", + "bbox": [ + 197, + 643, + 797, + 672 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "", + "bbox": [ + 197, + 672, + 303, + 684 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Note that you have both the question and the answer because it is necessary to ensure the correctness of the chain of thought. However, in your response, you can only refer to the content of the question and the audio, and lead to the answer. You must absolutely not assume that you already know the answer. Please provide a detailed and flexible response with high-quality logic in both the caption and reasoning sections. If the reasoning part requires complex logic, you can even propose several different approaches and try them one by one. Here is the original description: *** caption here ***.", + "bbox": [ + 197, + 699, + 799, + 795 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The question is: *** question here ***.", + "bbox": [ + 197, + 796, + 462, + 809 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The answer you can refer to : *** answer here ***.", + "bbox": [ + 197, + 810, + 540, + 823 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Again, don't mention that you have the answer and the description because they are only here to help you to design the chain of thought but should not exist in the real-world scenario, either in the think or response sections.", + "bbox": [ + 197, + 824, + 799, + 864 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "A.3 Prompt of Stage 3 when Processing Data (Sample from AudioSet)", + "text_level": 1, + "bbox": [ + 171, + 90, + 671, + 106 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "We are data reviewers. Next, you will receive an audio clip, along with its description, questions, answers, and most importantly, the thought process for solving the problems. Please determine and analyze whether all of these elements are completely correct, especially check if there are any hallucinations in the thought process. Return if there are no issues, and if there are errors in the data.", + "bbox": [ + 196, + 119, + 802, + 188 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Here is the description of the audio: *** caption here ***.", + "bbox": [ + 197, + 189, + 586, + 204 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Here is the question: *** question here ***.", + "bbox": [ + 197, + 204, + 493, + 217 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Here is the answer: *** answer here ***.", + "bbox": [ + 197, + 217, + 477, + 229 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "And here is the thought process: *** COT process here ***.", + "bbox": [ + 197, + 231, + 602, + 244 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Please conduct a thorough judgment and analysis and provide the result in the specified format.", + "bbox": [ + 197, + 244, + 797, + 272 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "B Synthetic Data Generation Pipeline", + "text_level": 1, + "bbox": [ + 171, + 294, + 504, + 311 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "B.1 Synthetic Data Introduction", + "text_level": 1, + "bbox": [ + 171, + 318, + 411, + 333 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Multi-Speaker Dataset: To enhance the model's ability to comprehend complex, multi-turn conversations among multiple speakers, we constructed the Multi-Speaker dataset using text-to-speech (TTS) technology. The dataset generation process consists of three steps: (1) Conversation Text Generation: We utilized commercial models to generate diverse multi-speaker conversation texts covering a wide range of scenarios. (2) Speech Synthesis: Leveraging all available timbres from LibriSpeech [Panayotov et al., 2015] as prompts, we employed the CosyVoice2 [Du et al., 2024] framework to synthesize high-quality speech samples. (3) Dataset Assembly: The synthesized speech samples, fearring distinct timbres, were carefully combined to create a rich and diverse multi-speaker conversation dataset.", + "bbox": [ + 169, + 335, + 826, + 460 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "This approach ensures that the dataset effectively trains the model to navigate intricate speaker interactions in natural conversations.", + "bbox": [ + 169, + 467, + 823, + 496 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Complex Audio Dataset: To strengthen the model's ability to process intricate audio compositions, we developed the Complex Audio dataset. The audio materials are sourced from AudioSet, AudioCaps, and our internal material library. The dataset consists of two primary types of audio data: (1) Temporally Complex Audio: Short audio clips were sequentially arranged, requiring the model to perform tasks such as sorting or counting based on temporal relationships. We sequentially arranged different short audio clips and required the model to perform sorting or counting tasks. (2) Composite Complex Audio: Multiple long-form audio tracks—such as engine sounds and musical instruments—were blended. The model was then tasked with extracting specific target sounds from the composite audio, enhancing its ability to analyze overlapping audio sources.", + "bbox": [ + 169, + 502, + 823, + 627 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "This dataset ensures that the model is well-equipped to handle real-world audio challenges, such as distinguishing between multiple concurrent sounds and identifying temporal patterns.", + "bbox": [ + 169, + 632, + 823, + 662 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "To better understand what Multi-Speaker dataset and Complex Audio dataset are respectively tasked with, we provide some examples in Sec. B.2, Sec. B.3.", + "bbox": [ + 169, + 666, + 823, + 696 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "B.2 Sample from Complex Audio Dataset", + "text_level": 1, + "bbox": [ + 171, + 90, + 475, + 106 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "", + "" + ], + "bbox": [ + 196, + 119, + 302, + 147 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "The question is about identifying the sounds in the audio and arranging them in the correct order. I will carefully listen to the audio multiple times, noting down the sounds as I recognize them. Then, I will compare the identified sound sequence with the provided answer options to choose the one that matches best.", + "bbox": [ + 194, + 148, + 799, + 202 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "", + "bbox": [ + 196, + 203, + 307, + 215 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Upon initial listening, I can discern sounds that seem like a shattering noise, an animal sound which I think sounds like a cow, a cat sound, and what sounds like some mechanical tools working, perhaps a chainsaw. I shall listen again to clarify the sounds and their order.", + "bbox": [ + 196, + 217, + 800, + 271 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "", + "bbox": [ + 196, + 272, + 294, + 284 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "1. After relistening, the first sound is definitely glass breaking.", + "bbox": [ + 196, + 285, + 718, + 300 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "2. The second sound is definitely a cow moing.", + "3. The third sound is a cat meowing.", + "4. The fourth sound is a chainsaw.", + "5. I compare this sequence (glass break, cow, cat, chainsaw) with the options provided.", + "6. Option (a) matches the order I identified.", + "", + "Based on the detailed listening and analysis, the correct order of sounds in the audio is: glass break, cow, cat, chainsaw. This matches option (a).", + "", + "", + "", + "The answer is a.", + "" + ], + "bbox": [ + 196, + 300, + 797, + 478 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "B.3 Sample from Multi-Speaker Dataset", + "text_level": 1, + "bbox": [ + 171, + 90, + 468, + 106 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "", + "bbox": [ + 196, + 119, + 271, + 133 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "", + "bbox": [ + 196, + 133, + 302, + 147 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "The user asks me to identify the number of speakers in the audio clip. To do this, I will listen carefully and try to distinguish unique voices. The conversation is between two hikers, I need to confirm this by identifying their individual lines and vocal characteristics. Finally, I will provide the answer based on the number of identified speakers.", + "bbox": [ + 194, + 148, + 799, + 203 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "", + "bbox": [ + 196, + 204, + 307, + 215 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "", + "bbox": [ + 196, + 217, + 290, + 229 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "The audio presents a conversation between two individuals hiking.", + "bbox": [ + 196, + 231, + 635, + 244 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Speaker 1: This person starts by commenting on the intensity of the climb and expresses doubt about continuing without a break. Later, after a reminder about the ranger's warning, this speaker agrees to turn back. The vocal tone suggests exertion and a desire to reach the summit, eventually giving way to agreement. Phrases spoken: \"Wow, this climb is intense. I am not sure I can go much further without a serious break,\" \"Turn back. But we're so close to the summit. I really want to make it, you know,\" \"Unpredictable mountain weather, okay. Okay, you are right.\"", + "bbox": [ + 194, + 244, + 800, + 340 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Speaker 2: This person responds to the first speaker's concerns, suggesting they turn back due to the darkening sky and diminishing daylight. They emphasize safety and remind the other speaker about the ranger's warning. The vocal tone conveys concern and prioritizes safety. Phrases spoken: \"I know right. I am starting to think we should turn back. That sky looks awfully dark, and we're losing daylight quickly,\" \"I hear you, but safety first. Remember what the ranger said about the unpredictable mountain weather,\" \"So, turn back.\"", + "bbox": [ + 194, + 340, + 802, + 422 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Based on the clear distinction in vocal tones and the back-and-forth nature of the dialogue, I can confirm that there are two distinct speakers in this audio clip.", + "bbox": [ + 194, + 422, + 800, + 450 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "", + "bbox": [ + 196, + 452, + 294, + 464 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "", + "bbox": [ + 196, + 465, + 312, + 477 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Initial Assessment: The audio starts with two distinct voices in a hiking - related dialogue. The voices are clearly distinguishable from the get - go, with one having a deeper timbre and the other a higher - pitched one.", + "bbox": [ + 194, + 478, + 800, + 518 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Dialogue Analysis: A close look at the content shows a clear difference in the speakers' stances. One is eager to press on to the summit, saying, \"We're almost there. Let's keep going.\" The other is more cautious, citing the darkening sky and fading daylight, stating, \"The weather looks bad. We should turn back.\"", + "bbox": [ + 194, + 520, + 800, + 574 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Vocal Tone Differentiation: Each speaker's vocal tone stays consistent. The eager speaker has an energetic and determined tone, with a rising pitch when advocating for continuing. The cautious speaker has a measured, worried tone, with a lower pitch when warning of danger.", + "bbox": [ + 194, + 575, + 800, + 617 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Confirming Speaker Count: A review of the audio shows no extra voices or interruptions.", + "bbox": [ + 194, + 617, + 800, + 630 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "The conversation moves smoothly between the two speakers, indicating only two are present.", + "bbox": [ + 194, + 631, + 800, + 643 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Final Answer: Based on this analysis, there are two individuals in the conversation.", + "bbox": [ + 194, + 645, + 743, + 657 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Thus, the answer is B.", + "bbox": [ + 196, + 657, + 344, + 670 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "", + "bbox": [ + 196, + 671, + 316, + 684 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "", + "bbox": [ + 196, + 685, + 302, + 698 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Based on the dialogue and vocal tone analysis, there are two distinct speakers in the audio.", + "bbox": [ + 194, + 699, + 800, + 713 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "The answer is B.", + "bbox": [ + 196, + 713, + 310, + 726 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "", + "bbox": [ + 196, + 727, + 307, + 739 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "", + "bbox": [ + 196, + 739, + 274, + 753 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "", + "bbox": [ + 196, + 753, + 300, + 767 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Based on the dialogue and distinct vocal tones present in the audio, I have identified two individuals engaged in the conversation. Therefore, the answer is B.", + "bbox": [ + 194, + 768, + 800, + 795 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "", + "bbox": [ + 196, + 796, + 305, + 808 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/2c7cf6efc88d700632067417e48e5a03b2a49288065af2afae5640b0f1376a7a.jpg", + "image_caption": [ + "C Further Dataset Analysis", + "Figure 6: Detailed information of the CoTA dataset: The maximum, minimum, and average number of words in the answers, as well as the average length of the audio." + ], + "image_footnote": [], + "bbox": [ + 173, + 122, + 823, + 345 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "CoTA's reasoning adaptability is also evident in its distribution of token lengths and audio duration across tasks in Figure 6. The dataset accommodates a broad range of reasoning complexities, with token lengths spanning 327 (CoVoST2) to 1,454 (Multi-Speaker), ensuring coverage of both concise and highly intricate reasoning processes. Notably, tasks requiring deep logical inference, such as complex audio, exhibit a well-balanced token distribution $(\\max = 423, \\text{avg} = 192.96)$ , supporting structured multi-step reasoning without unnecessary redundancy.", + "bbox": [ + 169, + 398, + 823, + 482 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Further, the dataset's average token lengths (164.48-481.57) align with task difficulty: longer reasoning chains characterize tasks such as sound description (AudioSet: 395.26) and music understanding (MusicBench: 463.89). Meanwhile, CoTA ensures practical generalization by maintaining audio durations between 2.85s and 26.34s, where shorter clips (e.g., MELD: 2.84s) support concise context-dependent reasoning, while extended sequences (Multi-Speaker: 26.34s) enable complex multi-turn inference. This systematic variation in reasoning depth and audio granularity ensures adaptability across diverse tasks, addressing the limitation of one-size-fits-all reasoning chains in existing audio datasets.", + "bbox": [ + 169, + 487, + 826, + 599 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 934, + 508, + 946 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "D More Case Studies", + "text_level": 1, + "bbox": [ + 171, + 89, + 367, + 104 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/9b43c9b6c9f74919ef542d987cdc96e3d1e58021911f412304ed512d5557ae1d.jpg", + "image_caption": [ + "Figure 7: Example of Audio-Reasoner answering music-based question." + ], + "image_footnote": [], + "bbox": [ + 197, + 142, + 240, + 174 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "User", + "bbox": [ + 205, + 176, + 235, + 186 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Analyze the interplay between the vocal sample, synth elements, and percussion within the track. How do these elements contribute to the overall texture and dynamic progression of the piece, particularly focusing on the shift from the intro to the main rhythm?", + "bbox": [ + 285, + 148, + 743, + 200 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/858369a50834266a93f34f3916a01464cba0e42a19f40a500f1166c517b2870b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 287, + 202, + 460, + 226 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/ba4834b047ef995baaa14b47b6dfb094678968914445d6cc380fd50ae8d1ba44.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 753, + 237, + 794, + 270 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Audio Reasoner", + "bbox": [ + 746, + 272, + 803, + 292 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "", + "text_level": 1, + "bbox": [ + 236, + 266, + 289, + 275 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "The user is asking us to identify potential influences or subgenres within rock music based on the provided instrumental track's instrumentation, chord progression, and overall sonic character. I will start with analyzing the chord progression, instrumentation, and overall sonic character of the music track. Then I will use these analyses to deduce potential influences and subgenres.", + "bbox": [ + 269, + 276, + 720, + 330 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "The music features a rock-oriented instrumental piece. The lead melody is carried by an electric guitar, featuring a slight overdrive or distortion. Acoustic rhythm guitars provide harmonic support, panned to the left and right. A slap-style bass guitar and acoustic drums form the rhythm section. The song follows a medium tempo in 4/4 time, with a chord progression of Bb, Ab6, F, and Ab6.", + "bbox": [ + 269, + 340, + 720, + 398 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": " The chord progression, Bb, Ab6, F, Ab6, has blues rock similarities because it includes flat 7 chords. Electric guitar with overdrive or distortion, a driving rhythm section (bass and drums), and acoustic rhythm guitars are characteristics of rock music. Slap bass is used in funk music. Given these considerations, several potential influences and subgenres present themselves. The blues and rock elements of the chord progression could indicate the influence of blues rock and hard rock, which often incorporate blues-based progressions with louder, more aggressive instrumentation. The slap bassline suggests a funk influence, which can be found in funk rock and other rock subgenres that incorporate funk elements. The overall sonic character is most indicative of rock, with potential cross-pollination from blues and funk.", + "bbox": [ + 269, + 407, + 720, + 529 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Based on the chord progression of Bb, Ab6, F, Ab6, the prominent electric guitar solo with overdrive, the acoustic rhythm guitars, the slap-style bass, and the acoustic drums, the music has potential influences or subgenres within rock music. These may include blues rock, hard rock, and funk rock.", + "bbox": [ + 269, + 540, + 715, + 584 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "", + "text_level": 1, + "bbox": [ + 236, + 585, + 292, + 595 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "", + "text_level": 1, + "bbox": [ + 236, + 597, + 320, + 606 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Considering the instrumentation, the chord progression (Bb, Ab6, F, Ab6), and the overall sonic character of the piece, this instrumental track may draw upon influences from several subgenres within rock music. The chord progression has blues and rock similarities, pointing towards blues-rock and hard-rock elements. Moreover, the slap bassline indicates the touch of funk.", + "bbox": [ + 269, + 607, + 723, + 660 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "", + "text_level": 1, + "bbox": [ + 236, + 662, + 323, + 672 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/a36cb05d818d194a4e6d2b64a41064f13fcd8e3fecc09d832b319f34886fd1c0.jpg", + "image_caption": [ + "User" + ], + "image_footnote": [], + "bbox": [ + 197, + 215, + 240, + 250 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Considering the sounds present in the audio, describe the likely environment in which the dog is located and detail the types of interactions or activities the dog might be engaged in.", + "bbox": [ + 285, + 222, + 750, + 262 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/6656392ff0178a74a688308bf440905dfacb28c80a2e0b33f331b3730e202032.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 287, + 263, + 460, + 287 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "", + "text_level": 1, + "bbox": [ + 236, + 320, + 287, + 329 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "The question asks about the dog's environment and activities based on the audio. I need to carefully analyze the audio to identify relevant sounds, infer the setting, and deduce potential interactions.", + "bbox": [ + 267, + 330, + 709, + 364 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "The audio contains dog barks of varying intensity and pitch. There are also sounds suggesting movement, such as rustling or the distinct sounds of the dog moving on what sounds to be hard floor surfaces. These movement sounds occur both during and between the barking, with the barks themselves being the main focus of sound. The pitch of the bark changes at different points, from higher to lower and back again.", + "bbox": [ + 267, + 375, + 723, + 441 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "", + "text_level": 1, + "bbox": [ + 269, + 452, + 357, + 460 + ], + "page_idx": 24 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. **Environment Inference:** The presence of hard floor surface sounds, coupled with the absence of specific outdoor environmental noises (e.g., wind, traffic, birdsong), leans towards an indoor location. The changing pitch and tone of barks show the arousal level is rapidly changing.", + "2. **Activity Inference:** The barking, interspersed with the sound of movement, indicates the dog is active. The varied bark intensity could suggest excitement, playfulness, or even alert behavior in relation to other sounds.", + "3. **Interaction Possibilities:** Based on the sounds alone, it's difficult to pinpoint the exact interaction. It could be playing with a toy, reacting to someone entering the room, or engaging in a game." + ], + "bbox": [ + 267, + 463, + 720, + 573 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "\\nThe dog is likely indoors on a hard floor surface. The audio suggests the dog is actively moving and barking, possibly engaged in play or reacting to something in its immediate surroundings. The dog is probably reacting to a stimulus within its environment, resulting in a change in the dog's arousal and tone and pitch of barks.", + "bbox": [ + 267, + 584, + 720, + 638 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "", + "text_level": 1, + "bbox": [ + 236, + 640, + 290, + 648 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "", + "text_level": 1, + "bbox": [ + 236, + 651, + 320, + 660 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Based on the audio, the dog is likely located in an indoor environment with hard floor surface sounds. The varied intensity and sound of the dog's barks, combined with the sound of its movement, indicate the dog is actively moving and vocalizing, and that the arousal level of the dog is actively changing. Given this information, the dog might be engaged in some activity or play, perhaps reacting to a toy or another person within the room.", + "bbox": [ + 267, + 661, + 720, + 727 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "", + "text_level": 1, + "bbox": [ + 236, + 728, + 323, + 738 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/aff642ef84ae66fc0319f623312be2998e8ccbd4665d511a6391ba6638e6705a.jpg", + "image_caption": [ + "Figure 8: Example of Audio-Reasoner answering sound-based question.", + "Audio Reasoner" + ], + "image_footnote": [], + "bbox": [ + 754, + 303, + 795, + 337 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/250663b1591e6ae5dbf7df5a0b76757a799f877069a3d178ee425094da48c030.jpg", + "image_caption": [ + "User" + ], + "image_footnote": [], + "bbox": [ + 197, + 215, + 241, + 250 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "What did the second person say about the rose bush?", + "bbox": [ + 285, + 222, + 624, + 234 + ], + "page_idx": 25 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(a) They like it.", + "(b) It's the bane of their existence.", + "(c) It needs more fertilizer.", + "(d) It provides good shade." + ], + "bbox": [ + 287, + 236, + 498, + 287 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/18e4c56940d96a5d28d7df8cd5be561661e52ea20dcad7561fe0902a688b15f5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 287, + 289, + 460, + 311 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "", + "text_level": 1, + "bbox": [ + 236, + 330, + 289, + 340 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "The question asks about what the second person said about the rose bush. The audio presents a conversation between two neighbors, Amelia and Ben. I need to identify who the second speaker is and what they said about the rose bush. Then, I need to match what the second speaker said with the given options.", + "bbox": [ + 267, + 342, + 715, + 397 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "The audio presents a conversation between Amelia and Ben about the upcoming block party and a troublesome rose bush.", + "bbox": [ + 267, + 407, + 712, + 430 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "**Speaker 1 (Amelia)**: Asks Ben if he's attending the block party and mentions the rose bush causing him trouble. She says \"You mentioned weedies, that rose bush still causing you trouble, the one that keeps growing over the fence?\". She also mentions she has been scratched by it.", + "bbox": [ + 267, + 430, + 717, + 474 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "**Speaker 2 (Ben)**: Confirms he's attending the block party, entering his chili in the cook-off, and complains about the rose bush. He says \"That thorny rose bush is the bane of my existence. It's constantly scratching me when I am gardening. So, yes, it's still causing me trouble.\" and suggests chopping it down. $\\backslash$ n\\`nTherefore, Ben is the second speaker.", + "bbox": [ + 267, + 474, + 717, + 529 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "", + "text_level": 1, + "bbox": [ + 267, + 540, + 357, + 550 + ], + "page_idx": 25 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. **Identify the second speaker:** Based on the audio, Ben is the second speaker.", + "2. **Identify what Ben said about the rose bush:** Ben says \"That thorny rose bush is the bane of my existence. It's constantly scratching me when I am gardening. So, yes, it's still causing me trouble.\" and suggests chopping it down.", + "3. **Match Ben's statement with the options:**", + "- (a) They like it. - This is incorrect because Ben clearly dislikes the rose bush.", + "- (b) It's the bane of their existence. - This aligns perfectly with Ben's statement.", + "- (c) It needs more fertilizer. - This is not mentioned in the audio.", + "- (d) It provides good shade. - This is also not mentioned in the audio." + ], + "bbox": [ + 267, + 551, + 712, + 650 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Therefore, the correct answer is (b).", + "bbox": [ + 269, + 651, + 550, + 661 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "\\nBased on the audio, the second speaker, Ben, describes the rose bush as \"the bane of my existence\" and complains about it scratching him. Therefore, the answer is (b). ", + "bbox": [ + 267, + 672, + 704, + 705 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "", + "text_level": 1, + "bbox": [ + 236, + 705, + 292, + 715 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "", + "text_level": 1, + "bbox": [ + 236, + 717, + 320, + 727 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "The second person, Ben, explicitly states that the rose bush $\"is the bane of my existence\"$ . Therefore, the answer is B.", + "bbox": [ + 267, + 728, + 691, + 750 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "", + "bbox": [ + 236, + 750, + 323, + 760 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/e1fbcf9ad869804d415d3768a1bafdb4f20a43d182c0fe8e203fecf5c5920fde.jpg", + "image_caption": [ + "Figure 9: Example of Audio-Reasoner answering multi-speaker speech-based question.", + "Audio", + "Reasoner" + ], + "image_footnote": [], + "bbox": [ + 753, + 333, + 795, + 364 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/bf0107d5c8c65914f99bf9fa3e65a54865c8b756def2b36ec09a1f74a1a04e7b.jpg", + "image_caption": [ + "User" + ], + "image_footnote": [], + "bbox": [ + 197, + 215, + 240, + 250 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Given that you hear buzzing plastic, identify the background sound?", + "bbox": [ + 285, + 222, + 709, + 236 + ], + "page_idx": 26 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(a). A washing machine running", + "(b). Rain falling on a window", + "(c). A toilet flushing", + "(d). Waves crashing on a shore" + ], + "bbox": [ + 287, + 236, + 485, + 287 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/f73ca89ffd4040b5bfdf6ba30b94d0ae554820407dfdcdd29189fdf86508fba7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 287, + 289, + 460, + 313 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "", + "text_level": 1, + "bbox": [ + 236, + 330, + 289, + 340 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "", + "text_level": 1, + "bbox": [ + 269, + 342, + 346, + 351 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "I will carefully and methodically approach the task of identifying the background sound in the audio clip. This will allow me to focus solely on the characteristics of the background noise. Once the background sound is isolated, I will analyze it in detail to determine its key qualities, such as its consistency, rhythm, and texture. Afterward, I will compare these characteristics to the provided answer options.", + "bbox": [ + 267, + 353, + 722, + 407 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": " Upon my initial listening, I observed that the background sound possesses characteristics that suggest fluid movement. There is a continuous, almost rhythmic, sound with subtle gurgling and sloshing. This could indicate the presence of water or some other flowing liquid. As I continue to listen more carefully, the flow becomes even more evident, suggesting a consistent rush or stream-like sound. ", + "bbox": [ + 267, + 419, + 717, + 484 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "", + "text_level": 1, + "bbox": [ + 269, + 496, + 356, + 506 + ], + "page_idx": 26 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. **Sound Characteristics:** My first observation is that the background sound features a continuous, almost rhythmic quality, with faint gurgling noises. These characteristics often point to a fluid in motion. Upon further listening, I recognize that the sound might be created by a liquid flowing from one place to another.", + "2. **Detailed Listening:** I focus on the quieter sections of the sound, where the fluid dynamics become clearer. The consistent, almost rushing quality suggests the movement of water rather than irregular drops or splashes.", + "3. **Comparison to Options:** With this in mind, I now compare the sound characteristics to the provided answer options. - \"A toilet flushing,\" which often involves a steady rush of water with gurgling and sloshing as the water flows through pipes. This closely matches the consistent flow and gurgling noises I hear in the background.", + "4. **Conclusion:** Based on my analysis, the sound best aligns with the description of a toilet flushing, due to the nature of the consistent, rushing water and subtle gurgling sounds. " + ], + "bbox": [ + 267, + 508, + 720, + 672 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "", + "text_level": 1, + "bbox": [ + 236, + 672, + 290, + 681 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "", + "text_level": 1, + "bbox": [ + 236, + 683, + 320, + 693 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "After listening to the audio carefully, isolating the background sound, and comparing it to the answer options, I concluded that the background sound most closely matches the sound of a toilet flushing. The key factors leading to this conclusion include the continuous rush of water and the gurgling quality, which are typical of a flushing toilet.", + "bbox": [ + 267, + 695, + 717, + 750 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "", + "text_level": 1, + "bbox": [ + 236, + 750, + 321, + 760 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/02e9abd6638b2b63d0c5724c13130da9bc69a42b07a4d1726ec78a20919f111b.jpg", + "image_caption": [ + "Figure 10: Example of Audio-Reasoner answering complex-audio-based question.", + "Audio", + "Reasoner" + ], + "image_footnote": [], + "bbox": [ + 753, + 333, + 795, + 364 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 26 + } +] \ No newline at end of file diff --git a/data/2025/2503_02xxx/2503.02318/185a8d9e-02ca-40b7-9498-1733bd88ebda_model.json b/data/2025/2503_02xxx/2503.02318/185a8d9e-02ca-40b7-9498-1733bd88ebda_model.json new file mode 100644 index 0000000000000000000000000000000000000000..bcf2b98902aec2b50a10fd65a4be4126da90f0f4 --- /dev/null +++ b/data/2025/2503_02xxx/2503.02318/185a8d9e-02ca-40b7-9498-1733bd88ebda_model.json @@ -0,0 +1,4973 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.279, + 0.061, + 0.718 + ], + "angle": 270, + "content": "arXiv:2503.02318v2 [cs.SD] 20 Sep 2025" + }, + { + "type": "title", + "bbox": [ + 0.179, + 0.122, + 0.818, + 0.175 + ], + "angle": 0, + "content": "Audio-Reasoner: Improving Reasoning Capability in Large Audio Language Models" + }, + { + "type": "text", + "bbox": [ + 0.338, + 0.226, + 0.66, + 0.241 + ], + "angle": 0, + "content": "Zhifei Xie\\(^{1\\dagger}\\) Mingbao Lin\\(^{3\\dagger}\\) Zihang Liu\\(^{2\\dagger}\\)" + }, + { + "type": "text", + "bbox": [ + 0.305, + 0.241, + 0.692, + 0.256 + ], + "angle": 0, + "content": "Pengcheng Wu1 Shuicheng Yan2† Chunyan Miao1‡" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.256, + 0.764, + 0.27 + ], + "angle": 0, + "content": "\\(^{1}\\)Nanyang Technological University \\(^{2}\\)National University of Singapore \\(^{3}\\)Rakuten" + }, + { + "type": "text", + "bbox": [ + 0.349, + 0.27, + 0.65, + 0.286 + ], + "angle": 0, + "content": "†Equal Contributions ‡Corresponding Authors" + }, + { + "type": "text", + "bbox": [ + 0.248, + 0.286, + 0.75, + 0.299 + ], + "angle": 0, + "content": "zhifei001@e.ntu.edu.sg linmb001@outlook.com liuzihang99@gmail.com" + }, + { + "type": "text", + "bbox": [ + 0.27, + 0.301, + 0.727, + 0.313 + ], + "angle": 0, + "content": "pengchengwu@ntu.edu.sg yansc@nus.edu.sg ascymiao@ntu.edu.sg" + }, + { + "type": "text", + "bbox": [ + 0.295, + 0.314, + 0.704, + 0.327 + ], + "angle": 0, + "content": "Project: https://github.com/xzf-thu/Audio-Reasoner" + }, + { + "type": "title", + "bbox": [ + 0.46, + 0.362, + 0.538, + 0.377 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.39, + 0.769, + 0.557 + ], + "angle": 0, + "content": "Recent advancements in multimodal reasoning have largely overlooked the audio modality. We introduce Audio-Reasoner, a large-scale audio language model for deep reasoning in audio tasks. We curate a diverse collection of multi-task audio datasets with simple annotations, refining them through structured secondary labeling and complex question generation. Additionally, We utilize advanced closed-source models to generate structured reasoning chains, transforming raw annotations into a formatted inference process. Following inference scaling principles, we train Audio-Reasoner on CoTA, a high-quality reasoning dataset with 1.2 million reasoning-rich samples. Experiments show state-of-the-art performance across key benchmarks, including MMAU-mini \\((+25.42\\%)\\), AIR-Bench chat/foundation \\((+14.57\\% / + 10.13\\%)\\), and MELD \\((+8.01\\%)\\). Our findings stress the core of structured CoT training in advancing audio reasoning." + }, + { + "type": "image", + "bbox": [ + 0.191, + 0.582, + 0.812, + 0.835 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.848, + 0.825, + 0.892 + ], + "angle": 0, + "content": "Figure 1: Benchmark performance of Audio-Reasoner on tasks of speech emotion recognition (SER), speech-to-text translation (S2TT), MMAU, and AIR-Bench chat. S2TT is measured using BLEU while SER and MMAU are measured ACC (left axis). AIR-Bench is scored by GPT (right axis)." + }, + { + "type": "footer", + "bbox": [ + 0.172, + 0.923, + 0.275, + 0.937 + ], + "angle": 0, + "content": "Technical report." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.174, + 0.09, + 0.313, + 0.106 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.114, + 0.827, + 0.309 + ], + "angle": 0, + "content": "Recent advancements in large language models (LLMs) have significantly enhanced their reasoning capabilities, driven by innovations like chain-of-thought (CoT) and inference scaling. Models such as OpenAI's o1 [Jaech et al., 2024] and Deepseek-R1 [Guo et al., 2025] have demonstrated significant improvements, especially in tasks like mathematics and coding [Team et al., 2025, Zhao et al., 2024a, Muennighoff et al., 2025, Liu et al., 2024a, Zhang et al., 2024b, Deng et al., 2024, Hui et al., 2024, Yang et al., 2024a]. These models have set a precedent for achieving \"deep thinking\" by tackling complex, structured tasks. The CoT framework, in particular, has been successfully applied to multimodal models, improving their cognitive abilities in image and video reasoning. Models such as Visual-CoT [Shao et al., 2024a], LLaVA-Reasoner [Zhang et al., 2024a], and MAmmoTH-VL [Guo et al., 2024] have demonstrated that large-scale datasets and multidimensional reasoning can enhance model performance, even in tasks involving images and videos [Zou et al., 2023]. Other works, like Mulberry [Wen et al., 2019] and Image-of-Thought [Zhou et al., 2024], integrate reflective and image-editing tools to further refine multimodal reasoning, offering new pathways for understanding complex queries across multiple modalities." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.315, + 0.828, + 0.483 + ], + "angle": 0, + "content": "However, the application of CoT in the audio modality has been largely underexplored. Although models like Audio Flamingo [Kong et al., 2024], SALMONN [Tang et al., 2023], and Qwen2-Audio [Chu et al., 2024] have pushed the boundaries of large audio language models (LALMs), these advancements have not yet incorporated CoT reasoning at scale. Recent research [Ma et al., 2025] suggests that existing CoT methods, such as zero-shot reasoning in audio tasks, fail to significantly improve performance on more complex queries. This limitation is largely attributed to the simplicity of existing audio datasets—such as AudioSet [Gemmeke et al., 2017], AudioCaps [Kim et al., 2019], and Clotho [Drossos et al., 2020)—which predominantly feature short, simple labels. These simplified datasets hinder the development of LALMs capable of more intricate reasoning. Without richer, more complex data, these models struggle with long-form reasoning, and the application of CoT often leads to severe hallucinations and degraded performance. Therefore, advancing CoT in LALMs necessitates overcoming these dataset limitations to allow for more effective, deep reasoning." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.489, + 0.827, + 0.683 + ], + "angle": 0, + "content": "To address the challenges in audio-based reasoning, we propose a scalable and effective approach to generating high-quality pretraining data. Using state-of-the-art commercial models, we introduce CoTA, a large-scale dataset containing 1.2 million refined captions and question-answer (QA) pairs. CoTA spans multiple datasets and tasks, undergoing rigorous filtering to ensure diversity and quality. Building on CoTA, we develop Audio-Reasoner, a large audio language model designed for long-context reasoning. Audio-Reasoner is trained with a 4K token context window and generates structured CoT reasoning with length could more than exceeding 1K tokens in real-world tasks. The model is fine-tuned on CoTA using supervised fine-tuning, adhering to a structured reasoning framework, as illustrated in Figure 2: (1) Planning—Identifies key problem components from the user query and formulates a structured sequence of reasoning steps essential for deriving an answer. (2) Caption—Extracts and integrates relevant multimodal content from the input to enrich the reasoning process. (3) Reasoning—Executes a systematic, step-by-step reasoning procedure to ensure logical coherence and accuracy. (4) Summary—Synthesizes the reasoning process into a final response that is concise, contextually grounded, and precise." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.689, + 0.828, + 0.842 + ], + "angle": 0, + "content": "Our experimental results, partially presented in Figure 1, demonstrate the effectiveness of Audio-Reasoner. More comprehensively, we evaluate the model across multiple benchmarks: MMAU-mini [Sakshi et al., 2024]: A dataset with 1,500 closed-choice questions testing reasoning across sound, speech, and music. AIR-Bench [Yang et al., 2024b]: Various types of audio signals including human speech, natural sounds, and music. CoVoST2(zh-en) [Wang et al., 2021]: Speech-to-text translation in Chinese and English. MELD [Poria et al., 2019]: Emotion classification. Compared to Qwen2-Audio-Instruct [Chu et al., 2024], Audio-Reasoner achieves: \\(+25.4\\%\\) improvement on MMAU-mini with reasoning subtask gains: \\(+44.4\\%\\), \\(+26.1\\%\\), and \\(+9.3\\%\\); \\(+14.6\\%\\) gains on AIR-Bench chat; \\(+30.6\\%\\) on CoVoST2(ZN/EN subset, Average BLEU score.); \\(+8.01\\%\\) on MELD. These results validate the effectiveness of our approach in advancing long-context reasoning and inference scaling for audio models." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.849, + 0.573, + 0.864 + ], + "angle": 0, + "content": "The major contributions we have made in this paper include:" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.87, + 0.825, + 0.913 + ], + "angle": 0, + "content": "- We propose Audio-Reasoner, designed for deep reasoning and inference scaling in the audio modality. Built upon Qwen2-Audio and fine-tuned with structured CoT training, Audio-Reasoner significantly improves long-context reasoning across diverse audio tasks." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.135 + ], + "angle": 0, + "content": "- We develop CoTA, a large-scale dataset with 1.2 million high-quality captions and QA pairs, spanning multiple audio domains. The dataset enables structured reasoning and enhances audio-language pretraining." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.144, + 0.825, + 0.187 + ], + "angle": 0, + "content": "- We introduce a scalable data generation pipeline leveraging advanced commercial models to produce complex reasoning-based QA pairs and structured CoT annotations, enriching model training." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.195, + 0.827, + 0.224 + ], + "angle": 0, + "content": "- We achieve state-of-the-art performance, with \\(+25.4\\%\\) gains over Qwen2-Audio-Instruct on MMAU-mini, along with significant improvements in reasoning, translation, and emotion recognition tasks." + }, + { + "type": "list", + "bbox": [ + 0.171, + 0.092, + 0.827, + 0.224 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.241, + 0.323, + 0.256 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.268, + 0.827, + 0.448 + ], + "angle": 0, + "content": "Chain-of-Thought Reasoning. LLMs leverage in-context learning (ICL) to enhance their reasoning capabilities by processing prompts and context. This is further strengthened through CoT techniques. Various CoT methods have been explored, including Tree of Thoughts (TOT) [Yao et al., 2023], manual few-shot CoT [Wei et al., 2022], and automatically generated approaches [Jin et al., 2024, Zhang et al., 2022]. In addition, studies have delved into the necessity of CoT, its theoretical foundations, and its effectiveness across a wide range of tasks [Sprague et al., 2024, Stechly et al., 2024, Turpin et al., 2023]. The release of OpenAI's o1 model [Jaech et al., 2024] has sparked renewed interest in CoT research, significantly boosting the capabilities of LLMs, especially in multi-step reasoning tasks such as coding [Zhang et al., 2024b] and mathematics [Yang et al., 2024a], setting new performance benchmarks. CoT techniques have been integrated with other methods such as Monte Carlo Tree Search [Browne et al., 2012], reflection [Guo et al., 2025], and tool use [Qin et al., 2023], and are often trained alongside reinforcement learning approaches [Rafailov et al., 2023, Zeng et al., 2024, Engstrom et al., 2020, Shao et al., 2024b]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.457, + 0.827, + 0.61 + ], + "angle": 0, + "content": "Multimodal Chain-of-Thought. CoT techniques have also been explored in the realm of multimodal large models. For example, Visual-COT [Shao et al., 2024a] incorporates object detection to aid in reasoning, LLaVA-Reasoner [Zhang et al., 2024a] uses closed-source models for CoT fine-tuning through recaptioning. LLaVA-CoT [Xu et al., 2024] and MAmmoTH-VL [Guo et al., 2024] scale datasets to improve model performance. Other models, such as Mulberry [Wen et al., 2019], explore application of reflective thinking, and Image-of-Thought [Zhou et al., 2024] integrates image editing tools to enhance reasoning. Video-related studies [Wang et al., 2024, Han et al., 2024, Fei et al., 2024, Tang et al., 2024] have demonstrated the effectiveness of CoT in reasoning tasks within the video domain. However, the application of CoT in the audio domain is still in its infancy. The study Audio-COT [Ma et al., 2025] shows some improvement with zero-shot COT in audio tasks, but it falls short on more complex problems. This paper aims to explore this gap further." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.62, + 0.827, + 0.773 + ], + "angle": 0, + "content": "Large Audio Language Models. LALMs can be broadly categorized into two areas: audio understanding and real-time dialogue. Audio understanding models typically consist of a three-layer architecture—an encoder, connector, and an LLM—focusing on specific domains, as seen in models like Mu-LLaMA [Liu et al., 2024b], LTU [Gong et al., 2023b], EmoBox [Ma et al., 2024], and GAMA [Ghosh et al., 2024]. Other models, such as LTU-AS [Gong et al., 2023a], SALMONN [Tang et al., 2023] and Qwen2-Audio [Chu et al., 2024], employ unified architectures designed for multi-task training. Real-time conversation models, which focus on speech input and extend transformers to real-time speech synthesis, are also gaining popularity [Zhang et al., 2023b, Xie and Wu, 2024a,b, Fu et al., 2025, Defossez et al., 2024]. However, despite their focus on understanding and rapid response, current LALMs still lack significant exploration into reasoning tasks, such as COT. This paper addresses this gap by investigating the application of CoT in LALMs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.789, + 0.342, + 0.804 + ], + "angle": 0, + "content": "3 Audio-Reasoner" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.815, + 0.825, + 0.913 + ], + "angle": 0, + "content": "In this section, we present the training methodology for our Audio-Reasoner model, designed to enable LALMs with structured, step-by-step reasoning in multimodal tasks. Unlike standard models, Audio-Reasoner generates both a chain-of-thought (CoT) and final response, ensuring logical consistency and interpretability. We describe the training framework, which includes a structured CoT approach, a novel data generation pipeline, and the large-scale CoTA dataset. This dataset is carefully constructed through automated annotation, reasoning chain creation, and validation, setting the foundation for robust and explainable audio-based reasoning." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.174, + 0.09, + 0.824, + 0.371 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.382, + 0.828, + 0.44 + ], + "angle": 0, + "content": "Figure 2: Comparison between Audio-Reasoner and Qwen2-Audio-Instruct: While Qwen2-Audio-Instruct produces brief and error-prone reasoning, our Audio-Reasoner uses a structured reasoning framework with distinct \"thinking\" and \"response\" phases, ensuring a more reliable and well-supported output through careful planning, information extraction, and step-by-step reasoning." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.466, + 0.48, + 0.481 + ], + "angle": 0, + "content": "3.1 Model Training with Audio Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.486, + 0.825, + 0.514 + ], + "angle": 0, + "content": "A standard large language model is trained to generate an output sequence \\( Y \\) given an input sequence \\( X \\). The probability distribution of the model's output is formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.425, + 0.523, + 0.826, + 0.54 + ], + "angle": 0, + "content": "\\[\nP (Y \\mid X; \\theta) = f _ {\\theta} (X), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.548, + 0.825, + 0.577 + ], + "angle": 0, + "content": "where \\( f_{\\theta} \\) is a Transformer-based model parameterized by \\( \\theta \\). The training objective follows a maximum likelihood estimation framework:" + }, + { + "type": "equation", + "bbox": [ + 0.394, + 0.584, + 0.826, + 0.625 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} (\\theta) = - \\sum_ {i = 1} ^ {N} \\log P \\left(Y _ {i} \\mid X _ {i}; \\theta\\right). \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.643, + 0.825, + 0.671 + ], + "angle": 0, + "content": "In our Audio-Reasoner, the input consists of an audio signal \\( A \\) and a text-based query \\( Q \\), forming the multimodal input representation:" + }, + { + "type": "equation", + "bbox": [ + 0.453, + 0.673, + 0.826, + 0.69 + ], + "angle": 0, + "content": "\\[\nX = (A, Q). \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.704, + 0.825, + 0.762 + ], + "angle": 0, + "content": "Unlike conventional LLMs, where the output is a single response, we structure the model's output into two distinct components: the chain of thought reasoning \\( C \\), which captures the step-by-step logical process, and the final response \\( R \\), which provides the ultimate answer. The model thus learns to generate the concatenation of \\( C \\) and \\( R \\), leading to the probability distribution:" + }, + { + "type": "equation", + "bbox": [ + 0.398, + 0.77, + 0.826, + 0.786 + ], + "angle": 0, + "content": "\\[\nP (C, R \\mid A, Q; \\theta) = f _ {\\theta} (A, Q). \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.804, + 0.825, + 0.831 + ], + "angle": 0, + "content": "To ensure explicit learning of both reasoning and final response generation, we construct a dataset defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.404, + 0.833, + 0.826, + 0.851 + ], + "angle": 0, + "content": "\\[\n\\mathcal {D} = \\left\\{\\left(A _ {i}, Q _ {i}, C _ {i}, R _ {i}\\right) \\right\\} _ {i = 1} ^ {N}, \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.826, + 0.913 + ], + "angle": 0, + "content": "where each training sample consists of an input audio signal \\( A_{i} \\), its corresponding textual query \\( Q_{i} \\), the structured reasoning process \\( C_{i} \\), and the final answer \\( R_{i} \\). This dataset formulation reinforces the model's ability to perform in-context learning and deep reasoning, ensuring that generated responses are not only accurate but also logically structured." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.123 + ], + "angle": 0, + "content": "The training objective maximizes the likelihood of both \\( C \\) and \\( R \\), encouraging the model to first reason and then generate a response. The loss function is given by:" + }, + { + "type": "equation", + "bbox": [ + 0.368, + 0.128, + 0.826, + 0.169 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} (\\theta) = - \\sum_ {i = 1} ^ {N} \\log P \\left(C _ {i}, R _ {i} \\mid A _ {i}, Q _ {i}; \\theta\\right). \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.181, + 0.825, + 0.224 + ], + "angle": 0, + "content": "By optimizing this objective, Audio-Reasoner is trained to articulate a structured reasoning process before providing its final response. This approach enhances interpretability, reliability, and alignment with human reasoning." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.229, + 0.827, + 0.342 + ], + "angle": 0, + "content": "At inference-time, our Audio-Reasoner follows a structured reasoning pipeline, as illustrated in Figure 2. The reasoning process consists of four sequential steps: (1) Planning \\((P)\\): The model analyzes the query, identifies key problem components, and outlines the reasoning steps necessary to derive an answer. (2) Captioning \\((C)\\): Relevant multimodal content is extracted from the input, such as speech transcription, acoustic event detection, or context information. (3) Reasoning \\((R)\\): Based on the extracted content, the model performs structured, step-by-step reasoning. (4) Summary \\((S)\\): The model synthesizes its reasoning process into a final, concise, and accurate response. This structured inference process can be formalized as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.417, + 0.346, + 0.826, + 0.363 + ], + "angle": 0, + "content": "\\[\nP \\sim f _ {\\theta} (A, Q), \\tag {7}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.418, + 0.365, + 0.826, + 0.38 + ], + "angle": 0, + "content": "\\[\nC \\sim f _ {\\theta} (A, Q, P), \\tag {8}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.419, + 0.382, + 0.826, + 0.398 + ], + "angle": 0, + "content": "\\[\nR \\sim f _ {\\theta} (A, Q, P, C), \\tag {9}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.42, + 0.4, + 0.826, + 0.415 + ], + "angle": 0, + "content": "\\[\nS \\sim f _ {\\theta} (A, Q, P, C, R). \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.429, + 0.83, + 0.5 + ], + "angle": 0, + "content": "Compared to the direct-response counterpart [Chu et al., 2024], this approach provides two key advantages: Improved Interpretability—By explicitly modeling each reasoning step, the process becomes more transparent, making it easier to analyze and diagnose errors. Reduced Hallucinations—The structured reasoning pipeline mitigates speculative or incorrect responses, ensuring that outputs remain logically grounded." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.504, + 0.828, + 0.587 + ], + "angle": 0, + "content": "Figure 2 illustrates the structured CoT reasoning process, highlighting how each stage contributes to the final response. This approach draws inspiration from recent advancements in symbolic reasoning and CoT training [Cui et al., 2024], which emphasize that zero-shot reasoning without training is less effective. Moreover, previous studies have shown that models tuned on native CoT data significantly outperform those trained on generic labels, especially in multimodal reasoning tasks [Guo et al., 2024, Wen et al., 2019]." + }, + { + "type": "table_caption", + "bbox": [ + 0.296, + 0.598, + 0.7, + 0.612 + ], + "angle": 0, + "content": "Table 1: Domains and tasks of our constructed CoTA dataset." + }, + { + "type": "table", + "bbox": [ + 0.306, + 0.617, + 0.689, + 0.732 + ], + "angle": 0, + "content": "
DomainTaskDescription
SoundSound QASound question answering
SpeechSpeech QASpeech question answering
SERSpeaker emotion recognition
S2TTSpeech to text translation
MusicMusic QAMusic question answering
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.748, + 0.564, + 0.764 + ], + "angle": 0, + "content": "3.2 Systematic Data Preparation for Audio Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.767, + 0.827, + 0.85 + ], + "angle": 0, + "content": "Training the Audio-Reasoner model requires a high-quality, diverse, and multitask audio-based reasoning dataset. Our goal is to develop a scalable and effective data generation method that systematically transforms raw audio data and simple human-labeled annotations into structured reasoning tasks. The resulting CoTA dataset with 1.2 million samples, focusing on complex reasoning-based question-answering tasks, spans three domains—audio, speech, and music—as detailed in Table 1." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.828, + 0.914 + ], + "angle": 0, + "content": "To achieve this, we design a structured data generation pipeline consisting of three key stages: (1) generating high-quality annotations and diverse questions, (2) constructing structured reasoning chains, and (3) performing comprehensive validation. The complete pipeline is illustrated in Figure 3. The following sections describe each stage in detail." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.177, + 0.09, + 0.825, + 0.351 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.346, + 0.362, + 0.651, + 0.379 + ], + "angle": 0, + "content": "Figure 3: Multistage data generation pipeline." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.403, + 0.482, + 0.418 + ], + "angle": 0, + "content": "3.2.1 Multistage Data Generation Pipeline" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.42, + 0.827, + 0.56 + ], + "angle": 0, + "content": "Stage 1: Automated Annotation and Question-Answer Synthesis. We begin by using advanced external closed-source models to improve the existing simple human annotations into high-quality and logically coherent training data. While large language models can produce hallucinations in free-form generation, they are highly effective at structured, evidence-based content creation. We leverage this strength by instructing the model to sequentially describe the audio elements, enhancing its understanding of sound sources and speech context. Based on these enriched descriptions, the model generates a diverse set of questions, ranging from straightforward factual queries to complex logical reasoning problems. This process ensures that the dataset captures a broad spectrum of reasoning tasks, as summarized in Table 1. Besides, in Sec. A.1 we provide the prompt in deducing closed-source models to generate questions and corresponding answers." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.566, + 0.828, + 0.678 + ], + "angle": 0, + "content": "Stage 2: Structured Reasoning Chain Construction. Next, we transform the generated question-answer pairs into structured reasoning chains. Given the limited development of CoT methodologies in the audio domain, we adopt a systematic approach to ensure inference stability. The model first plans and analyzes the questions, extracts key information from the captions, and formulates logical steps leading to the answer. To facilitate structured reasoning, we employ explicit step demarcations such as and , allowing the model to autonomously construct multi-step inference pathways. Sec. A.2 describes the prompt used for the structured reasoning chain construction process." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.684, + 0.829, + 0.755 + ], + "angle": 0, + "content": "Stage 3: Quality Assurance and Dataset Validation. Finally, we subject the generated data to a rigorous review process. Using the raw audio input, Stage 1 annotations, and Stage 2 reasoning chains, the model assesses whether the generated content is accurate, coherent, and suitable for inclusion in the final dataset. This step ensures the overall quality and reliability of the CoTA dataset. Sec. A.3 illustrates prompt used for filtering low-quality contents." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.762, + 0.634, + 0.777 + ], + "angle": 0, + "content": "3.2.2 Task Taxonomy: Categories and Reasoning Methodologies" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.779, + 0.825, + 0.809 + ], + "angle": 0, + "content": "The CoTA dataset encompasses a range of reasoning-based tasks, each requiring distinct reasoning path that the model should grasp. These include:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.815, + 0.828, + 0.913 + ], + "angle": 0, + "content": "(1) Sound-Based Question Answering: The model identifies and analyzes sound characteristics, contextualizing them within the user's query to derive a reasoned response. (2) Speech-Based Question Answering: The model recognizes speaker timbres, transcribes speech content, and incrementally processes the question to determine the appropriate answer. (3) Speech Emotion Recognition (SER) and Speech-to-Text Translation (S2TT): These specialized tasks require the model to integrate speech recognition with emotion analysis and language translation, forming a structured reasoning process. (4) Music-Based Question Answering: As music is highly abstract, the" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.937, + 0.506, + 0.948 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.088, + 0.825, + 0.13 + ], + "angle": 0, + "content": "Table 2: Composition of our CoTA Dataset. We consider Google Gemini [Team et al., 2024] to build the reasoning ability in CoTA. Note that Multi-Speaker and Complex Audio datasets are manually synthesized, details of which can be referred to Sec. B." + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.138, + 0.825, + 0.317 + ], + "angle": 0, + "content": "
CategoryDataset SourceMain Skills LearningModel UsedQuantityPercentageSynthetic
SpeechMulti-SpeakerMulti-speaker Speech QAgemini-2.0-flash117.4k12.09%Yes
MELD [Poria et al., 2019]Speech Emotion QAgemini-2.0-pro-exp29.2k3.01%No
CoVoST2 [Wang et al., 2021]Speech-to-Text Translationgemini-2.0-flash224.6k23.13%No
Mustard [Castro et al., 2019]Sarcasm Detectiongemini-2.0-pro-exp1k0.10%No
MusicMusicBench [Melechovsky et al., 2024]Music QAgemini-2.0-flash137.1k14.12%No
SoundAudioSet [Gemmeke et al., 2017]Sound QAgemini-2.0-flash315.2k32.46%No
Clotho [Drossos et al., 2020]Sound QAgemini-2.0-pro-exp9.3k0.93%No
AudioCaps [Kim et al., 2019]Sound QAgemini-2.0-flash117.5k12.10%No
Complex AudioComplex Audio QAgemini-2.0-flash20k2.06%Yes
" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.343, + 0.825, + 0.388 + ], + "angle": 0, + "content": "model first analyzes fundamental attributes such as tonality, tempo, and emotion before progressing to genre classification and deeper inferential reasoning based on the user's query. A comprehensive breakdown of the dataset's task categories and reasoning pathways is provided in Table 1." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.395, + 0.414, + 0.41 + ], + "angle": 0, + "content": "3.2.3 Conclusion and Next Steps" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.413, + 0.827, + 0.496 + ], + "angle": 0, + "content": "In summary, we have introduced a systematic data generation pipeline that ensures the creation of high-quality, structured reasoning data for the Audio-Reasoner model. Our approach involves enriching raw audio data with detailed annotations, generating diverse questions, constructing explicit reasoning chains, and implementing a comprehensive validation framework. The following section provides an in-depth analysis of the final CoTA dataset and its reasoning capabilities, with a detailed statistical overview presented in Table 2." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.508, + 0.377, + 0.523 + ], + "angle": 0, + "content": "3.3 CoTA Dataset Analysis" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.527, + 0.825, + 0.598 + ], + "angle": 0, + "content": "To evaluate the quality and reasoning efficacy of the CoTA dataset, we analyze its design from two key perspectives: (1) comprehensive audio coverage, ensuring broad representation across real-world and synthetic scenarios, and (2) scalability of reasoning complexity, which aligns task difficulty with structured inference patterns. Together, these aspects address critical gaps in audio-language pretraining." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.604, + 0.827, + 0.689 + ], + "angle": 0, + "content": "Comprehensive Audio Coverage. CoTA integrates three audio domains—speech (38.33%), music (14.12%), and environmental sounds (47.55%)—ensuring diverse and representative coverage of real-world auditory contexts. This multi-domain structure captures a wide spectrum of acoustic phenomena, ranging from conversational speech (e.g., speech-to-text translation tasks in CoVoST 2) to intricate musical structures (MusicBench) and fine-grained environmental sound analysis (e.g., AudioSet's rich descriptions of acoustic environments)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.695, + 0.827, + 0.794 + ], + "angle": 0, + "content": "A distinctive feature of CoTA is its hybrid synthetic-real composition, where synthetic data (Multi-Speaker and Complex Audio, \\(14.15\\%\\) of total samples) is strategically incorporated to enhance complex reasoning tasks, such as multi-step logical inference in Complex Audio. Meanwhile, the majority of the dataset is derived from high-quality real-world sources (e.g., MELD for emotion recognition). By unifying tasks across 10 distinct categories, spanning from fundamental classification to advanced tasks like translation and irony detection, CoTA facilitates a hierarchical learning process—an aspect largely absent in traditional datasets constrained to simple labeling tasks." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.801, + 0.829, + 0.913 + ], + "angle": 0, + "content": "Scalability of Reasoning Complexity. The word count distribution in the CoTA dataset highlights the model's capacity for long-chain reasoning. As shown in Figure 4, most responses fall between 300 and 500 words, allowing for nuanced reasoning and detailed logical steps, particularly in audio and music question answering. This extended length supports transparency in reasoning, ensuring a thorough exploration of complex ideas. For more demanding tasks, such as those in the Multi-Speaker dataset, responses can reach up to 1,500 words. This increase reflects the model's ability to systematically break down intricate problems, demonstrating adaptive reasoning in scenarios requiring a deep understanding of multiple interacting elements." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.182, + 0.092, + 0.393, + 0.171 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.396, + 0.092, + 0.601, + 0.171 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.603, + 0.092, + 0.815, + 0.171 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.171, + 0.393, + 0.247 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.396, + 0.171, + 0.602, + 0.247 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.603, + 0.171, + 0.815, + 0.247 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.247, + 0.393, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.396, + 0.247, + 0.602, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.603, + 0.247, + 0.815, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.337, + 0.828, + 0.38 + ], + "angle": 0, + "content": "Figure 4: The bar chart shows the data length distribution across nine CoTA sub-datasets, with intervals of 100 on the horizontal axis (0—1200) and proportions on the vertical axis (totaling \\(100\\%\\)). A moving average trend line is overlaid." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.407, + 0.825, + 0.492 + ], + "angle": 0, + "content": "Conversely, simpler tasks like S2TT elicit more concise responses, typically between 100 and 200 words. This brevity prevents unnecessary elaboration, maintaining clarity and efficiency when a straightforward answer suffices. By adjusting response length based on task complexity, the model showcases its dynamic reasoning capabilities—balancing depth and precision to effectively address a wide range of analytical challenges. Sec. C introduces more evidence about CoTA's scalability of reasoning complexity." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.506, + 0.315, + 0.523 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.53, + 0.353, + 0.545 + ], + "angle": 0, + "content": "4.1 Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.549, + 0.825, + 0.618 + ], + "angle": 0, + "content": "Training Details. Our model, Audio-Reasoner, is built on Qwen2-Audio-Instruct [Chu et al., 2024], comprising a total of 8.4 billion parameters. The training process was conducted using the ms-swift framework [Zhao et al., 2024b], employing supervised fine-tuning with full-parameter tuning. We used a maximum learning rate of 1e-5 and trained the model for a single epoch across the entire CoTA dataset." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.627, + 0.827, + 0.793 + ], + "angle": 0, + "content": "Evaluation Metric. Existing evaluation datasets in the audio domain primarily focus on simple classification tasks or BLEU[Papineni et al., 2002] score-based assessments of model responses. These approaches, however, fail to comprehensively reflect the reasoning capabilities of LALMs. To address this, our evaluation methodology incorporates multiple diverse benchmarks. First, we assessed the model's accuracy on closed-form questions using the MMAU-mini subset [Sakshi et al., 2024]. This subset was specifically chosen since the model's training data does not include multiple-choice questions. Additionally, we evaluated its real-world conversational abilities using the chat and foundation sections of AIR-Bench [Yang et al., 2024b]. These benchmarks encompass various audio modalities, including sound, speech, and music. Beyond these, we also tested the model's performance on traditional audio-related tasks such as speech-to-text translation (S2TT) on CoVoST 2 [Wang et al., 2021] and speech emotion recognition (SER) on MELD dataset [Poria et al., 2019]. A comprehensive summary of the evaluation tasks and datasets is presented in Table 3." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.801, + 0.827, + 0.913 + ], + "angle": 0, + "content": "Baselines: We primarily select state-of-the-art large audio language models as the baselines for comparison. These include the closed-source models Gemini-1.5-pro [Team et al., 2024], GPT-4o [Hurst et al., 2024], Qwen-audio-turbo [Chu et al., 2023], as well as the open-source models SALMONN [Tang et al., 2023], Qwen-Audio-Chat [Chu et al., 2023], and Qwen2-AudioInstruct [Chu et al., 2024] that also serves as the base model. Additionally, we compared cascade model approaches such as Whisper [Radford et al., 2023] + GPT-4 [Achiam et al., 2023] and a series of mainstream multimodal large language models. [Gong et al., 2023b,a, Kong et al., 2024, Ghosh et al., 2024, Liu et al., 2024b, Su et al., 2023, Wu et al., 2024, Wang et al., 2023, Zhang et al., 2023a]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.281, + 0.088, + 0.717, + 0.103 + ], + "angle": 0, + "content": "Table 3: Summary of evaluation benchmarks for Audio-Reasoner." + }, + { + "type": "table", + "bbox": [ + 0.177, + 0.11, + 0.825, + 0.412 + ], + "angle": 0, + "content": "
DatasetSplitTaskMetric
MMAU-miniSoundSound QAACC
SpeechSpeech QAACC
MusicMusic QAACC
AIR-BenchChat-SoundSound QAGPT-4 Eval
Chat-SpeechSpeech QAGPT-4 Eval
Chat-MusicMusic QAGPT-4 Eval
Chat-MixedAudioComplex Audio QAGPT-4 Eval
Foundation-SoundAQASound QAACC
Foundation-SERSpeech QAACC
Foundation-SICSpeaker Intent ClassificationACC
Foundation-SNVSpeaker Number VerificationACC
Foundation-MusicAQAMusic QAACC
CoVoST 2TestSpeech-to-Text Translation (S2TT)BLEU
MELDTestSpeech Emotion Recognition (SER)ACC
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.438, + 0.308, + 0.451 + ], + "angle": 0, + "content": "4.2 Main Results" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.457, + 0.828, + 0.528 + ], + "angle": 0, + "content": "To evaluate the effectiveness of Audio-Reasoner, we compare its performance against both closed-source and open-source baselines on benchmark datasets of MMAU-mini and AIR-Bench chat/foundation, CoVoST 2 (zn/en subset) and MELD. The results in Tables 4, 5, 6, 7 and 8 clearly demonstrate that Audio-Reasoner significantly outperforms existing models, setting a new state-of-the-art in audio reasoning tasks." + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.539, + 0.828, + 0.569 + ], + "angle": 0, + "content": "Table 4: Performance comparison on MMAU-mini. The \\(\\{\\mathbf{so},\\mathbf{mu},\\mathbf{sp}\\}\\) indicates whether \"sound\", \"music\", and \"speech\" have been used in training." + }, + { + "type": "table", + "bbox": [ + 0.185, + 0.576, + 0.812, + 0.827 + ], + "angle": 0, + "content": "
ModelSize{so, mu, sp}SoundMusicSpeechAvg
Closed-Source
gpt4o + caption----63.3660.7753.1557.30
gemini-1.5-pro----56.7549.4058.5554.90
Open-Source
LTU7BY Y N22.529.6917.7116.89
LTU-AS7BY Y Y23.359.1020.6017.68
Audio Flamingo - Chat2.2BY Y N23.4215.2611.4116.69
GAMA7BY Y N41.4432.3318.9130.90
GAMA-IT7BY Y N43.2428.4418.9130.20
MU-LLaMA7BN Y N40.8432.6322.2231.90
SALMONN13BY Y Y41.0034.8025.5033.70
Qwen-audio-Chat8.4BY Y Y55.2544.0030.0343.10
Qwen2-Audio-Instruct8.4BY Y Y54.9550.9842.0449.20
Ours
Audio-Reasoner8.4BY Y Y60.0664.3060.7061.71
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.842, + 0.83, + 0.914 + ], + "angle": 0, + "content": "Performance on MMAU-mini. MMAU-mini in Table 4 assesses multimodal audio understanding across three major domains: sound, music, and speech. We first make a comparison with closed-source models. Audio-Reasoner achieves the highest overall score (61.71%) outperforming GPT-4o (57.30%) and Gemini-1.5-Pro (54.90%). Music reasoning shows the most significant improvement—Audio-Reasoner: 64.30%, GPT-4o 60.77% and Gemini-1.5-Pro: 49.40%. This indicates superior" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.28, + 0.088, + 0.717, + 0.103 + ], + "angle": 0, + "content": "Table 5: Performance comparison on AIR-Bench chat benchmark." + }, + { + "type": "table", + "bbox": [ + 0.221, + 0.109, + 0.776, + 0.364 + ], + "angle": 0, + "content": "
ModelSoundMusicSpeechMixed AudioAverage
Closed-Source
Whisper+GPT4--7.54-7.54
Qwen-Audio-Turbo6.595.987.045.776.34
Gemini-1.5-pro5.495.066.975.275.70
Open-Source
Macaw-LLM1.010.910.971.001.01
SpeechGPT0.950.951.571.141.15
Next-gpt4.764.183.862.924.13
Pandagpt5.465.063.582.934.25
BLSP5.555.086.174.525.33
Qwen-Audio6.955.526.475.386.08
SALMONN6.285.956.166.086.11
Qwen2-Audio-Instruct6.996.797.186.776.93
Ours
Audio-Reasoner7.688.058.196.657.94
" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.388, + 0.828, + 0.501 + ], + "angle": 0, + "content": "musical structure comprehension, enabled by CoTA's diverse music-based tasks. Speech-based reasoning is also notably strong—Audio-Reasoner: \\(60.70\\%\\), GPT-4o: \\(53.15\\%\\) and Gemini-1.5-Pro: \\(58.55\\%\\). This validates CoTA's impact in training models for context-dependent spoken language understanding. In comparison with open-source models, Audio-Reasoner surpasses all open-source models, with the next-best, Qwen2-Audio-Instruct, trailing by 12.51 percentage points \\((49.20\\%)\\). Across individual domains, Audio-Reasoner achieves \\(60.06\\%\\) in sound reasoning (beating Qwen2-Audio's \\(54.95\\%\\)), \\(64.30\\%\\) in music (outperforming Qwen2-Audio's \\(50.98\\%\\)), and \\(60.70\\%\\) in speech (exceeding Qwen2-Audio's \\(42.04\\%\\))." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.506, + 0.828, + 0.645 + ], + "angle": 0, + "content": "Performance on AIR-Bench chat. (1) chat benchmark. AIR-Bench chat in Table 5 evaluates contextual and conversational reasoning across four audio types: sound, music, speech, and mixed audio. Regarding closed-source models, Audio-Reasoner achieves the highest overall score (7.94), outperforming Gemini-1.5-Pro (5.70) and Whisper+GPT-4 (7.54). It shows the most significant improvements in music (8.05) and speech (8.19). Additionally, its mixed audio reasoning score (6.65) demonstrates proficiency in handling multi-source audio tasks. As for comparison with open-source models, Audio-Reasoner sets a new benchmark, surpassing Qwen2-Audio (6.93) by 1.01 points. Across domains, it achieves 7.68 in sound (beating Qwen2-Audio's 6.99), 8.05 in music (exceeding Qwen2-Audio's 6.79), and 8.19 in speech (outperforming Qwen-2-Audio's 7.18), showcasing balanced expertise." + }, + { + "type": "table_caption", + "bbox": [ + 0.258, + 0.657, + 0.739, + 0.673 + ], + "angle": 0, + "content": "Table 6: Performance comparison on AIR-Bench foundation benchmark." + }, + { + "type": "table", + "bbox": [ + 0.183, + 0.678, + 0.812, + 0.911 + ], + "angle": 0, + "content": "
ModelAIR-SoundAIR-MusicAIR-SpeechAverage
SoundAQAMusicAQASERSICSNV
Closed-Source
whisper+GPT4--59.587.730.059.1
Qwen-Audio-Turbo62.862.560.056.454.359.2
Open-Source
NEXT-GPT18.847.125.725.625.428.5
SpeechGPT33.931.337.645.832.636.2
BLSP36.131.027.446.628.133.8
PandaGPT48.750.726.028.543.239.4
SALMONN28.454.629.936.734.336.8
Qwen-Audio-Chat64.648.243.277.835.353.8
Ours
Audio-Reasoner65.755.260.588.156.365.2
" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.17, + 0.092, + 0.828, + 0.3 + ], + "angle": 0, + "content": "(2) foundation benchmark. AIR-Bench foundation in Table 6 evaluates fundamental audio understanding across three primary categories: sound, music, and speech, with speech further divided into three subdomains: Speech Emotion Recognition (SER), Speaker Identification and Classification (SIC), and Speech Number Variation (SNV). Audio-Reasoner achieves the highest overall score (65.2), outperforming both closed-source and open-source baselines. Compared to the strongest closed-source model, Qwen-Audio-Turbo (59.2), Audio-Reasoner leads by 6.0 points, demonstrating superior reasoning across all audio domains. With the sound category, Audio-Reasoner attains 65.7, surpassing Qwen-Audio-Chat (64.6) and Qwen-Audio-Turbo (62.8), highlighting its strong ability in environmental and non-speech audio understanding. For music reasoning, Audio-Reasoner achieves 55.2, significantly outperforming Qwen-Audio-Turbo (48.2) and all open-source baselines, indicating better comprehension of musical structures and attributes. Regarding speech reasoning, Audio-Reasoner sets new state-of-the-art results across all subdomains. It attains 60.5 in SER (v.s. Qwen-Audio-Turbo's 60.0), 88.1 in SIC (surpassing Whisper+GPT-4's 87.7), and 56.3 in SNV (exceeding Qwen-Audio-Turbo's 54.3). The substantial lead in SIC showcases its exceptional speaker recognition capability, benefiting from CoTA's step-by-step reasoning process." + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.311, + 0.828, + 0.34 + ], + "angle": 0, + "content": "Table 7: Performance comparison of the speech-to-text translation (S2TT) task on CoVoST 2 dataset. We consider the mutual conversion between Chinese and English as training and evaluation data." + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.347, + 0.821, + 0.473 + ], + "angle": 0, + "content": "
ModelEN-ZNZN-ENAvg
BLEU1BLEU2BLEU3BLEU4AvgBLEU1BLEU2BLEU3BLEU4Avg
Closed-Source
Gemini-1.5-pro68.2549.1237.8129.7946.2451.8326.6116.2710.8826.3936.32
Open-Source
Qwen2-Audio-Instruct58.6339.5528.7121.4037.0748.5224.3114.659.2424.1830.63
Ours
Audio-Reasoner72.8954.1742.4633.9550.8756.5029.9918.3711.6229.1340.00
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.486, + 0.827, + 0.543 + ], + "angle": 0, + "content": "Performance on CoVoST 2. The CoVoST 2 dataset in Table 7 evaluates speech-to-text translation, a fundamental task in cross-lingual speech understanding. Audio-Reasoner demonstrates the strengths of Audio-Reasoner in speech-to-text translation across both English-to-Chinese (EN-ZN) and Chinese-to-English (ZN-EN) tasks." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.548, + 0.828, + 0.674 + ], + "angle": 0, + "content": "For EN-ZN translation, Audio-Reasoner outperforms both closed-source Gemini-1.5-pro and open-source Qwen2-Audio-Instruct. With an average BLEU score of 50.87, it surpasses Gemini-1.5-pro's score of 46.24 by 4.63 points and Qwen2-Audio-Instruct's 37.07 by a significant 13.80 points. Audio-Reasoner's BLEU-4 score of 33.95 highlights its ability to generate fluent, high-quality translations, even for more complex sentence structures. In the ZN-EN translation task, Audio-Reasoner continues to show superiority with an average BLEU score of 29.13, outperforming Gemini-1.5-pro (with a score of 26.39) by 2.74 points and Qwen2-Audio-Instruct (with a score of 24.18) by 4.95 points. Its BLEU-4 score of 11.62 reflects an enhanced ability to produce coherent and accurate translations, particularly in more challenging, longer sentences." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.679, + 0.825, + 0.709 + ], + "angle": 0, + "content": "These results demonstrate that Audio-Reasoner excels in both capturing cross-lingual semantic alignment, consistently outperforming existing models in speech-to-text translation tasks." + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.719, + 0.822, + 0.735 + ], + "angle": 0, + "content": "Table 8: Performance comparison of the speech emotion recognition (SER) task on MELD dataset." + }, + { + "type": "table", + "bbox": [ + 0.341, + 0.74, + 0.653, + 0.815 + ], + "angle": 0, + "content": "
ModelUnweighted_ACC
EMO-box31.5
SALMONN39.2
Qwen2-Audio-Instruct49.9
Audio-Reasoner53.9
" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.828, + 0.825, + 0.913 + ], + "angle": 0, + "content": "Performance on MELD. The MELD dataset in Table 8 evaluates speech emotion recognition, a crucial task for understanding affective states in spoken language. Audio-Reasoner achieves the highest unweighted accuracy of 53.9, outperforming all baseline models, including the previous state-of-the-art Qwen2-Audio-Instruct (49.9) by 4.0 percentage points. The results further validate that Audio-Reasoner excels in emotion perception and contextual audio understanding, solidifying its effectiveness in real-world speech emotion recognition tasks." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.191, + 0.103, + 0.216, + 0.127 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.106, + 0.761, + 0.127 + ], + "angle": 0, + "content": "Analyze the interplay between the guitar riff and the spoken commentary. How do they contribute to the overall impression or potential purpose of the piece?" + }, + { + "type": "title", + "bbox": [ + 0.238, + 0.13, + 0.332, + 0.144 + ], + "angle": 0, + "content": "1" + }, + { + "type": "image", + "bbox": [ + 0.78, + 0.15, + 0.808, + 0.181 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.204, + 0.157, + 0.246, + 0.166 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.167, + 0.761, + 0.19 + ], + "angle": 0, + "content": " The question asks about interplay and contribution. Analyze the music clip, focusing on: Characterizing the guitar riff: style, rhythm, melodic features. Characterizing the spoken commentary: tone, content, frequency. Analyzing their relationship: explain, praise, interrupt. Inferring the overall impression and purpose: instruction, demonstration, etc.? " + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.196, + 0.765, + 0.214 + ], + "angle": 0, + "content": " The audio has a blues - rock guitar riff on a clean - toned electric guitar, moderate tempo, 4/4 time. There's male spoken commentary, informal and relaxed, suggesting instruction or informal performance." + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.219, + 0.354, + 0.228 + ], + "angle": 0, + "content": " Consider scenarios:" + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.228, + 0.346, + 0.235 + ], + "angle": 0, + "content": "**Scenario 1: Instructional Context" + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.235, + 0.754, + 0.251 + ], + "angle": 0, + "content": "If commentary explains riff aspects (e.g., \"bend on third string\", \"E minor blues progression\"), it's instructional. Guitar demonstrates, commentary clarifies. If it gives positive reinforcement (\"very cool riff, man\"), it's a casual lesson or performance with teaching." + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.252, + 0.407, + 0.261 + ], + "angle": 0, + "content": "**Scenario 2: Informal Performance/Demonstration" + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.261, + 0.763, + 0.277 + ], + "angle": 0, + "content": "If commentary is minimal (reactions like \"yeah\", \"alright\"), it's an informal performance. Riffs simplicity and repetition suggest a demonstration for less - experienced musicians." + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.279, + 0.767, + 0.296 + ], + "angle": 0, + "content": "The audio leans towards scenario 1. \"very cool riff, man\" implies positive assessment for teaching or demonstration. Clean tone and moderate pace suggest instruction/demonstration, not a raw blues performance." + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.296, + 0.693, + 0.304 + ], + "angle": 0, + "content": "Refining: E minor blues with E - B chord progression and C# implies commentary helps demonstrate their use. " + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.312, + 0.756, + 0.337 + ], + "angle": 0, + "content": " The blues - rock, moderate - tempo guitar riff is complemented by relaxed, informal spoken commentary. The interplay indicates a purpose of instruction or demonstration of the riff's construction. The positive comment supports an informal teaching or relaxed demonstration." + }, + { + "type": "title", + "bbox": [ + 0.204, + 0.338, + 0.249, + 0.346 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.204, + 0.348, + 0.27, + 0.358 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.358, + 0.765, + 0.383 + ], + "angle": 0, + "content": "The interplay creates an impression of informal instruction or demonstration. The clean - toned guitar plays a moderate - tempo blues - rock riff, and the male voice gives relaxed commentary like \"very cool riff, man\". The riff's simplicity, moderate pace, and E minor blues elements suggest the purpose is to demonstrate the riff and how to play its features, not a formal performance or raw blues." + }, + { + "type": "text", + "bbox": [ + 0.204, + 0.384, + 0.274, + 0.393 + ], + "angle": 0, + "content": "" + }, + { + "type": "image_caption", + "bbox": [ + 0.26, + 0.425, + 0.735, + 0.44 + ], + "angle": 0, + "content": "Figure 5: Example of Audio-Reasoner answering music-based question." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.478, + 0.828, + 0.673 + ], + "angle": 0, + "content": "Why Audio-Reasoner Excels. While open-source models typically lag behind closed-source alternatives, Audio-Reasoner narrows this gap significantly. It outperforms Gemini-1.5-Pro in multiple categories and achieves competitive results against GPT-4o. A key factor driving Audio-Reasoner's state-of-the-art performance is the CoTA dataset, which provides an unparalleled foundation for comprehensive, structured, and context-aware audio reasoning. The dataset contributes to Audio-Reasoner's success in two critical ways: (1) CoTA's comprehensive audio reasoning data. Unlike models trained on fragmented or task-specific datasets, Audio-Reasoner benefits from CoTA's extensive and balanced coverage of sound, speech, and music-based reasoning. This ensures robust adaptation across diverse audio tasks, enhancing its ability to generalize across multimodal benchmarks. (2) CoTA's strong logical and contextual inference. CoTA's emphasis on structured reasoning and contextual awareness enables Audio-Reasoner to outperform existing models in long-form reasoning (MMAU-mini) and conversational audio understanding (AIR-Bench chat). By leveraging deep logical inference and nuanced context retention, it surpasses prior approaches in handling complex speech interactions and multimodal reasoning." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.691, + 0.825, + 0.734 + ], + "angle": 0, + "content": "These strengths position Audio-Reasoner as a breakthrough in open-source audio intelligence, setting a new benchmark for multimodal reasoning in speech comprehension, translation, and real-world audio interpretation." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.757, + 0.292, + 0.772 + ], + "angle": 0, + "content": "4.3 Case Study" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.781, + 0.828, + 0.88 + ], + "angle": 0, + "content": "Here we show a case in Figure 5, demonstrating the audio-based reasoning capability of Audio-Reasoner. The system analyzes the interplay between a guitar riff and a spoken commentary, systematically breaking down their characteristics and relationship. It identifies key musical features, assesses the commentary's tone and intent, and infers the overall purpose of the piece. By considering different scenarios, Audio-Reasoner determines that the interplay suggests an informal instructional or demonstrative context rather than a formal performance. This example highlights the model's ability to extract meaningful insights from audio, combining musical analysis with contextual interpretation." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.898, + 0.53, + 0.913 + ], + "angle": 0, + "content": "For more remarkable examples, please refer to Sec. D." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.09, + 0.303, + 0.107 + ], + "angle": 0, + "content": "5 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.114, + 0.827, + 0.199 + ], + "angle": 0, + "content": "In this work, we introduced Audio-Reasoner, a large audio language model (LALM) designed to advance deep reasoning in audio-based tasks. By leveraging inference scaling and structured chain-of-thought (CoT) reasoning, we demonstrated significant performance improvements across key benchmarks. Central to our approach is CoTA, a large-scale, high-quality dataset containing around 1.2 million structured reasoning samples, which we generated through a systematic pipeline of annotation refinement, question synthesis, and CoT generation." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.204, + 0.828, + 0.275 + ], + "angle": 0, + "content": "Our experimental results highlight the effectiveness of structured reasoning in the audio domain, achieving state-of-the-art performance on MMAU-mini \\((+25.04\\%)\\), CoVoST 2 \\((+8.31\\%)\\), and MELD \\((+8.01\\%)\\). These findings underscore the critical role of reasoning-rich datasets and inference scaling in multimodal learning, particularly for audio-based tasks where existing models struggle with complex reasoning." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.28, + 0.828, + 0.364 + ], + "angle": 0, + "content": "Moving forward, we believe that structured CoT methodologies will play a crucial role in enhancing the reasoning capabilities of LALMs. Future research directions include extending Audio-Reasoner's capabilities to multi-turn dialogue reasoning, cross-modal knowledge integration, and real-world applications in speech-driven AI systems. By publicly releasing our model, dataset, and code, we aim to foster further innovation and collaboration in the field of audio reasoning, paving the way for more advanced and interpretable LALMs." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.377, + 0.27, + 0.392 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.4, + 0.829, + 0.442 + ], + "angle": 0, + "content": "Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.452, + 0.826, + 0.508 + ], + "angle": 0, + "content": "Cameron B Browne, Edward Powley, Daniel Whitehouse, Simon M Lucas, Peter I Cowling, Philipp Rohlfshagen, Stephen Tavener, Diego Perez, Spyridon Samothrakis, and Simon Colton. A survey of monte carlo tree search methods. IEEE Transactions on Computational Intelligence and AI in Games (T-CIAIG), (1):1-43, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.518, + 0.826, + 0.561 + ], + "angle": 0, + "content": "Santiago Castro, Devamanyu Hazarika, Verónica Pérez-Rosas, Roger Zimmermann, Rada Mihalcea, and Soujanya Poria. Towards multimodal sarcasm detection. In Annual Meeting of the Association for Computational Linguistics (ACL), pages 4619-4629, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.57, + 0.825, + 0.613 + ], + "angle": 0, + "content": "Yunfei Chu, Jin Xu, Xiaohuan Zhou, Qian Yang, Shiliang Zhang, Zhijie Yan, Chang Zhou, and Jingren Zhou. Qwen-audio: Advancing universal audio understanding via unified large-scale audio-language models. arXiv preprint arXiv:2311.07919, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.622, + 0.827, + 0.664 + ], + "angle": 0, + "content": "Yunfei Chu, Jin Xu, Qian Yang, Haojie Wei, Xipin Wei, Zhifang Guo, Yichong Leng, Yuanjun Lv, Jinzheng He, Junyang Lin, et al. Qwen2-audio technical report. arXiv preprint arXiv:2407.10759, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.675, + 0.827, + 0.717 + ], + "angle": 0, + "content": "Yingqian Cui, Pengfei He, Xianfeng Tang, Qi He, Chen Luo, Jiliang Tang, and Yue Xing. A theoretical understanding of chain-of-thought: Coherent reasoning and error-aware demonstration. arXiv preprint arXiv:2410.16540, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.727, + 0.827, + 0.77 + ], + "angle": 0, + "content": "Alexandre Defossez, Laurent Mazare, Manu Orsini, Amelie Royer, Patrick Pérez, Hervé Jégou, Edouard Grave, and Neil Zeghidour. Moshi: a speech-text foundation model for real-time dialogue. arXiv preprint arXiv:2410.00037, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.779, + 0.825, + 0.809 + ], + "angle": 0, + "content": "Yuntian Deng, Yejin Choi, and Stuart Shieber. From explicit cot to implicit cot: Learning to internalize cot step by step. arXiv preprint arXiv:2405.14838, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.818, + 0.827, + 0.859 + ], + "angle": 0, + "content": "Konstantinos Drossos, Samuel Lipping, and Tuomas Virtanen. Clotho: An audio captioning dataset. In International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 736-740, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.87, + 0.827, + 0.913 + ], + "angle": 0, + "content": "Zhihao Du, Yuxuan Wang, Qian Chen, Xian Shi, Xiang Lv, Tianyu Zhao, Zhifu Gao, Yexin Yang, Changfeng Gao, Hui Wang, et al. Cosyvoice 2: Scalable streaming speech synthesis with large language models. arXiv preprint arXiv:2412.10117, 2024." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.4, + 0.829, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.134 + ], + "angle": 0, + "content": "Logan Engstrom, Andrew Ilyas, Shibani Santurkar, Dimitris Tsipras, Firdaus Janoos, Larry Rudolph, and Aleksander Madry. Implementation matters in deep policy gradients: A case study onppo and trpo. arXiv preprint arXiv:2005.12729, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.142, + 0.826, + 0.187 + ], + "angle": 0, + "content": "Hao Fei, Shengqiong Wu, Wei Ji, Hanwang Zhang, Meishan Zhang, Mong Li Lee, and Wynne Hsu. Video-of-thought: step-by-step video reasoning from perception to cognition. In International Conference on Machine Learning (ICML), pages 13109-13125, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.195, + 0.824, + 0.239 + ], + "angle": 0, + "content": "Chaoyou Fu, Haojia Lin, Xiong Wang, Yi-Fan Zhang, Yunhang Shen, Xiaoyu Liu, Yangze Li, Zuwei Long, Heting Gao, Ke Li, et al. Vita-1.5: Towards gpt-4o level real-time vision and speech interaction. arXiv preprint arXiv:2501.01957, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.246, + 0.827, + 0.303 + ], + "angle": 0, + "content": "Jort F Gemmeke, Daniel PW Ellis, Dylan Freedman, Aren Jansen, Wade Lawrence, R Channing Moore, Manoj Plakal, and Marvin Ritter. Audio set: An ontology and human-labeled dataset for audio events. In International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 776-780. IEEE, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.312, + 0.827, + 0.37 + ], + "angle": 0, + "content": "Sreyan Ghosh, Sonal Kumar, Ashish Seth, Chandra Kiran Reddy Evuru, Utkarsh Tyagi, S Sakshi, Oriol Nieto, Ramani Duraiswami, and Dinesh Manocha. Gama: A large audio-language model with advanced audio understanding and complex reasoning abilities. In Empirical Methods in Natural Language Processing, pages 6288-6313, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.377, + 0.827, + 0.422 + ], + "angle": 0, + "content": "Yuan Gong, Alexander H Liu, Hongyin Luo, Leonid Karlinsky, and James Glass. Joint audio and speech understanding. In Automatic Speech Recognition and Understanding Workshop (ASRU), pages 1-8, 2023a." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.43, + 0.824, + 0.459 + ], + "angle": 0, + "content": "Yuan Gong, Hongyin Luo, Alexander H Liu, Leonid Karlinsky, and James Glass. Listen, think, and understand. arXiv preprint arXiv:2305.10790, 2023b." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.468, + 0.826, + 0.511 + ], + "angle": 0, + "content": "Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.52, + 0.826, + 0.564 + ], + "angle": 0, + "content": "Jarvis Guo, Tuney Zheng, Yuelin Bai, Bo Li, Yubo Wang, King Zhu, Yizhi Li, Graham Neubig, Wenhu Chen, and Xiang Yue. Mammoth-vl: Eliciting multimodal reasoning with instruction tuning at scale. arXiv preprint arXiv:2412.05237, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.572, + 0.826, + 0.615 + ], + "angle": 0, + "content": "Songhao Han, Wei Huang, Hairong Shi, Le Zhuo, Xiu Su, Shifeng Zhang, Xu Zhou, Xiaojuan Qi, Yue Liao, and Si Liu. Videospresso: A large-scale chain-of-thought dataset for fine-grained video reasoning via core frame selection. arXiv preprint arXiv:2411.14794, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.623, + 0.826, + 0.667 + ], + "angle": 0, + "content": "Binyuan Hui, Jian Yang, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Lei Zhang, Tianyu Liu, Jiajun Zhang, Bowen Yu, Keming Lu, et al. Qwen2. 5-coder technical report. arXiv preprint arXiv:2409.12186, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.676, + 0.827, + 0.718 + ], + "angle": 0, + "content": "Aaron Hurst, Adam Lerner, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.727, + 0.826, + 0.77 + ], + "angle": 0, + "content": "Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.779, + 0.824, + 0.81 + ], + "angle": 0, + "content": "Feihu Jin, Yifan Liu, and Ying Tan. Zero-shot chain-of-thought reasoning guided by evolutionary algorithms in large language models. arXiv preprint arXiv:2402.05376, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.818, + 0.824, + 0.861 + ], + "angle": 0, + "content": "Chris Dongjoo Kim, Byeongchang Kim, Hyunmin Lee, and Gunhee Kim. Audiocaps: Generating captions for audios in the wild. In *Nations of the Americas* Chapter of the Association for Computational Linguistics (NAACL), pages 119-132, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.87, + 0.824, + 0.913 + ], + "angle": 0, + "content": "Zhifeng Kong, Arushi Goel, Rohan Badlani, Wei Ping, Rafael Valle, and Bryan Catanzaro. Audio flamingo: A novel audio language model with few-shot learning and dialogue abilities. In International Conference on Machine Learning (ICML), pages 25125-25148, 2024." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.134 + ], + "angle": 0, + "content": "Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.145, + 0.826, + 0.189 + ], + "angle": 0, + "content": "Shansong Liu, Atin Sakkeer Hussain, Chenshuo Sun, and Ying Shan. Music understanding llama: Advancing text-to-music generation with question answering and captioning. In International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 286-290, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.199, + 0.826, + 0.242 + ], + "angle": 0, + "content": "Ziyang Ma, Mingjie Chen, Hezhao Zhang, Zhisheng Zheng, Wenxi Chen, Xiquan Li, Jiaxin Ye, Xie Chen, and Thomas Hain. Emobox: Multilingual multi-corpus speech emotion recognition toolkit and benchmark. arXiv preprint arXiv:2406.07162, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.252, + 0.826, + 0.295 + ], + "angle": 0, + "content": "Ziyang Ma, Zhuo Chen, Yuping Wang, Eng Siong Chng, and Xie Chen. Audio-cot: Exploring chain-of-thought reasoning in large audio language model. arXiv preprint arXiv:2501.07246, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.305, + 0.826, + 0.361 + ], + "angle": 0, + "content": "Jan Melechovsky, Zixun Guo, Deepanway Ghosal, Navonil Majumder, Dorien Herremans, and Soujanya Poria. Mustango: Toward controllable text-to-music generation. In *Nations of the Americas Chapter of the Association for Computational Linguistics (NAACL)*, pages 8286-8309, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.373, + 0.826, + 0.416 + ], + "angle": 0, + "content": "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling. arXiv preprint arXiv:2501.19393, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.426, + 0.826, + 0.47 + ], + "angle": 0, + "content": "Vassil Panayotov, Guoguo Chen, Daniel Povey, and Sanjeev Khudanpur. Librispeech: an asr corpus based on public domain audio books. In International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 5206-5210, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.48, + 0.826, + 0.524 + ], + "angle": 0, + "content": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. Bleu: a method for automatic evaluation of machine translation. In Annual Meeting of the Association for Computational Linguistics (ACL), pages 311-318, 2002." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.533, + 0.826, + 0.578 + ], + "angle": 0, + "content": "Soujanya Poria, Devamanyu Hazarika, Navonil Majumder, Gautam Naik, Erik Cambria, and Rada Mihalcea. Meld: A multimodal multi-party dataset for emotion recognition in conversations. In Annual Meeting of the Association for Computational Linguistics (ACL), pages 527-536, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.587, + 0.826, + 0.631 + ], + "angle": 0, + "content": "Yujia Qin, Shihao Liang, Yining Ye, Kunlun Zhu, Lan Yan, Yaxi Lu, Yankai Lin, Xin Cong, Xiangru Tang, Bill Qian, et al. Toolllm: Facilitating large language models to master 16000+ real-world apis. arXiv preprint arXiv:2307.16789, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.641, + 0.826, + 0.684 + ], + "angle": 0, + "content": "Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, and Ilya Sutskever. Robust speech recognition via large-scale weak supervision. In International Conference on Machine Learning (ICML), pages 28492-28518, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.693, + 0.826, + 0.738 + ], + "angle": 0, + "content": "Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. In Advances in Neural Information Processing Systems (NeurIPS), pages 53728-53741, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.748, + 0.826, + 0.805 + ], + "angle": 0, + "content": "S Sakshi, Utkarsh Tyagi, Sonal Kumar, Ashish Seth, Ramaneswaran Selvakumar, Oriol Nieto, Ramani Duraiswami, Sreyan Ghosh, and Dinesh Manocha. Mmau: A massive multi-task audio understanding and reasoning benchmark. In International Conference on Learning Representations (ICLR), 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.815, + 0.826, + 0.859 + ], + "angle": 0, + "content": "Hao Shao, Shengju Qian, Han Xiao, Guanglu Song, Zhuofan Zong, Letian Wang, Yu Liu, and Hongsheng Li. Visual cot: Unleashing chain-of-thought reasoning in multi-modal language models. arXiv preprint arXiv:2403.16999, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.869, + 0.826, + 0.913 + ], + "angle": 0, + "content": "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024b." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.828, + 0.147 + ], + "angle": 0, + "content": "Zayne Sprague, Fangcong Yin, Juan Diego Rodriguez, Dongwei Jiang, Manya Wadhwa, Prasann Singhal, Xinyu Zhao, Xi Ye, Kyle Mahowald, and Greg Durrett. To cot or not to cot? chain-of-thought helps mainly on math and symbolic reasoning. arXiv preprint arXiv:2409.12183, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.157, + 0.827, + 0.2 + ], + "angle": 0, + "content": "Kaya Stechly, Karthik Valmeekam, and Subbarao Kambhampati. Chain of thoughtlessness? an analysis of cot in planning. In Advances in Neural Information Processing Systems (NeurIPS), pages 29106-29141, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.207, + 0.825, + 0.251 + ], + "angle": 0, + "content": "Yixuan Su, Tian Lan, Huayang Li, Jialu Xu, Yan Wang, and Deng Cai. Pandagpt: One model to instruction-follow them all. In Workshop on Taming Large Language Models: Controllability in the era of Interactive Assistants (TLLM), pages 11-23, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.259, + 0.825, + 0.301 + ], + "angle": 0, + "content": "Changli Tang, Wenyi Yu, Guangzhi Sun, Xianzhao Chen, Tian Tan, Wei Li, Lu Lu, Zejun Ma, and Chao Zhang. *Salmon: Towards generic hearing abilities for large language models.* arXiv preprint arXiv:2310.13289, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.31, + 0.827, + 0.353 + ], + "angle": 0, + "content": "Yunlong Tang, Gen Zhan, Li Yang, Yiting Liao, and Chenliang Xu. Cardiff: Video salient object ranking chain of thought reasoning for saliency prediction with diffusion. arXiv preprint arXiv:2408.12009, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.361, + 0.825, + 0.405 + ], + "angle": 0, + "content": "Gemini Team, Petko Georgiev, Ving Ian Lei, Ryan Burnell, Libin Bai, Anmol Gulati, Garrett Tanzer, Damien Vincent, Zhufeng Pan, Shibo Wang, et al. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.413, + 0.825, + 0.456 + ], + "angle": 0, + "content": "Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.464, + 0.825, + 0.507 + ], + "angle": 0, + "content": "Miles Turpin, Julian Michael, Ethan Perez, and Samuel Bowman. Language models don't always say what they think: Unfaithful explanations in chain-of-thought prompting. In Advances in Neural Information Processing Systems (NeurIPS), pages 74952-74965, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.515, + 0.827, + 0.558 + ], + "angle": 0, + "content": "Changhan Wang, Anne Wu, Jiatao Gu, and Juan Pino. Covost 2 and massively multilingual speech translation. In Conference of the International Speech Communication Association (Interspeech), pages 2247-2251, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.566, + 0.825, + 0.609 + ], + "angle": 0, + "content": "Chen Wang, Minpeng Liao, Zhongqiang Huang, Jinliang Lu, Junhong Wu, Yuchen Liu, Chengqing Zong, and Jiajun Zhang. Blsp: Bootstrapping language-speech pre-training via behavior alignment of continuation writing. arXiv preprint arXiv:2309.00916, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.618, + 0.825, + 0.66 + ], + "angle": 0, + "content": "Yan Wang, Yawen Zeng, Jingsheng Zheng, Xiaofen Xing, Jin Xu, and Xiangmin Xu. Videocot: A video chain-of-thought dataset with active annotation tool. In Workshop on Advances in Language and Vision Research (ALVR), pages 92-101, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.668, + 0.825, + 0.712 + ], + "angle": 0, + "content": "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. In Advances in Neural Information Processing Systems (NeurIPS), pages 24824-24837, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.72, + 0.827, + 0.763 + ], + "angle": 0, + "content": "Peng Wen, Teng-Gen Hu, Robert J Linhardt, Sen-Tai Liao, Hong Wu, and Yu-Xiao Zou. Mulberry: A review of bioactive compounds and advanced processing technology. Trends in food science & technology, 83:138-158, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.771, + 0.825, + 0.801 + ], + "angle": 0, + "content": "Shengqiong Wu, Hao Fei, Leigang Qu, Wei Ji, and Tat-Seng Chua. Next-gpt: Any-to-any multimodal llm. In International Conference on Machine Learning (ICML), pages 53366-53397, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.809, + 0.825, + 0.838 + ], + "angle": 0, + "content": "Zhifei Xie and Changqiao Wu. Mini-omni: Language models can hear, talk while thinking in streaming. arXiv preprint arXiv:2408.16725, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.846, + 0.825, + 0.876 + ], + "angle": 0, + "content": "Zhifei Xie and Changqiao Wu. Mini-omni2: Towards open-source gpt-4o with vision, speech and duplex capabilities. arXiv preprint arXiv:2410.11190, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.883, + 0.825, + 0.913 + ], + "angle": 0, + "content": "Guowei Xu, Peng Jin, Li Hao, Yibing Song, Lichao Sun, and Li Yuan. Llava-o1: Let vision language models reason step-by-step. arXiv preprint arXiv:2411.10440, 2024." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.828, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.828, + 0.135 + ], + "angle": 0, + "content": "An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, et al. Qwen2. 5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.142, + 0.826, + 0.2 + ], + "angle": 0, + "content": "Qian Yang, Jin Xu, Wenrui Liu, Yunfei Chu, Ziyue Jiang, Xiaohuan Zhou, Yichong Leng, Yuanjun Lv, Zhou Zhao, Chang Zhou, et al. Air-bench: Benchmarking large audio-language models via generative comprehension. In Annual Meeting of the Association for Computational Linguistics (ACL), pages 1979-1998, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.207, + 0.827, + 0.252 + ], + "angle": 0, + "content": "Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Tom Griffiths, Yuan Cao, and Karthik Narasimhan. Tree of thoughts: Deliberate problem solving with large language models. In Advances in Neural Information Processing Systems (NeurIPS), pages 11809-11822, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.259, + 0.825, + 0.302 + ], + "angle": 0, + "content": "Yongcheng Zeng, Guoqing Liu, Weiyu Ma, Ning Yang, Haifeng Zhang, and Jun Wang. Token-level direct preference optimization. In International Conference on Machine Learning (ICML), pages 58348-58365, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.31, + 0.827, + 0.354 + ], + "angle": 0, + "content": "Dong Zhang, Shimin Li, Xin Zhang, Jun Zhan, Pengyu Wang, Yaqian Zhou, and Xipeng Qiu. Speechgpt: Empowering large language models with intrinsic cross-modal conversational abilities. arXiv preprint arXiv:2305.11000, 2023a." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.362, + 0.827, + 0.405 + ], + "angle": 0, + "content": "Dong Zhang, Shimin Li, Xin Zhang, Jun Zhan, Pengyu Wang, Yaqian Zhou, and Xipeng Qiu. Speechgpt: Empowering large language models with intrinsic cross-modal conversational abilities. In Empirical Methods in Natural Language Processing (EMNLP), pages 15757-15773, 2023b." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.413, + 0.827, + 0.457 + ], + "angle": 0, + "content": "Ruohong Zhang, Bowen Zhang, Yanghao Li, Haotian Zhang, Zhiqing Sun, Zhe Gan, Yinfei Yang, Ruoming Pang, and Yiming Yang. Improve vision language model chain-of-thought reasoning. arXiv preprint arXiv:2410.16198, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.464, + 0.827, + 0.495 + ], + "angle": 0, + "content": "Yuxiang Zhang, Shangxi Wu, Yuqi Yang, Jiangming Shu, Jinlin Xiao, Chao Kong, and Jitao Sang. o1-coder: an o1 replication for coding. arXiv preprint arXiv:2412.00154, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.502, + 0.825, + 0.532 + ], + "angle": 0, + "content": "Zhuosheng Zhang, Aston Zhang, Mu Li, and Alex Smola. Automatic chain of thought prompting in large language models. arXiv preprint arXiv:2210.03493, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.54, + 0.827, + 0.583 + ], + "angle": 0, + "content": "Yu Zhao, Huifeng Yin, Bo Zeng, Hao Wang, Tianqi Shi, Chenyang Lyu, Longyue Wang, Weihua Luo, and Kaifu Zhang. Marco-ol: Towards open reasoning models for open-ended solutions. arXiv preprint arXiv:2411.14405, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.591, + 0.827, + 0.635 + ], + "angle": 0, + "content": "Yuze Zhao, Jintao Huang, Jinghan Hu, Xingjun Wang, Yunlin Mao, Daoze Zhang, Zeyinzi Jiang, Zhikai Wu, Baole Ai, Ang Wang, et al. Swift: a scalable lightweight infrastructure for fine-tuning. arXiv preprint arXiv:2408.05517, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.643, + 0.825, + 0.685 + ], + "angle": 0, + "content": "Qiji Zhou, Ruochen Zhou, Zike Hu, Panzhong Lu, Siyang Gao, and Yue Zhang. Image-of-thought prompting for visual reasoning refinement in multimodal large language models. arXiv preprint arXiv:2405.13872, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.693, + 0.825, + 0.724 + ], + "angle": 0, + "content": "Anni Zou, Zhuosheng Zhang, Hai Zhao, and Xiangru Tang. Generalizable chain-of-thought prompting in mixed-task scenarios with large language models. arXiv preprint arXiv:2310.06692, 2023." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.828, + 0.724 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.09, + 0.339, + 0.108 + ], + "angle": 0, + "content": "A Prompt Details" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.114, + 0.828, + 0.323 + ], + "angle": 0, + "content": "A universally applicable method for writing prompts involves three key components: a clear task definition, a structured example, and a precise format specification. Our prompt adheres to this methodology by first defining the task explicitly, outlining the need for detailed audio descriptions and progressively challenging questions. It then provides a structured example that demonstrates the expected output format, ensuring clarity and minimizing ambiguity. Lastly, it specifies the exact formatting rules using delimiters such as ... and ..., ensuring consistency in responses. This approach guarantees efficiency by eliminating interpretative variance, allowing for precise and reproducible outputs. When drafting this prompt, we adhered to a structured approach to maximize clarity and effectiveness. The first-person perspective is used to emphasize our direct involvement in designing the task, ensuring the reader understands the rationale behind each structural choice. The structure follows a logical progression: we begin by introducing the general method, transition into an explanation of how our prompt aligns with this method, and conclude by justifying the approach's efficiency. By maintaining an academic tone, we reinforce the credibility and rigor of our prompt-writing methodology. We list some of the prompts used for tasks at different stages following below." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.331, + 0.673, + 0.347 + ], + "angle": 0, + "content": "A.1 Prompt of Stage 1 when Processing Data (Sample from AudioSet)" + }, + { + "type": "text", + "bbox": [ + 0.195, + 0.36, + 0.803, + 0.473 + ], + "angle": 0, + "content": "We are annotating some audio and designing some questions. You are an excellent audio analyst. Next, you will receive an audio and one absolutely correct but simple description. Your task is to first generate a more detailed, in-depth and absolutely correct new description based on the given descriptions. Then, use this description to generate three open-ended or single-choice questions with four options along with their answers. Please separate different parts using ...\n...\n...\n\n...\n...." + }, + { + "type": "text", + "bbox": [ + 0.195, + 0.484, + 0.803, + 0.584 + ], + "angle": 0, + "content": "Here is a sample. Please strictly follow the format in the sample. The audio presents a sustained, high-frequency static noise, characteristic of a detuned or malfunctioning electronic device, likely a television or radio...Describe the characteristics of the static noise in the audio, and how these characteristics change over time. What...? " + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.594, + 0.543, + 0.608 + ], + "angle": 0, + "content": "Here is the original description: *** label here ***." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.609, + 0.317, + 0.622 + ], + "angle": 0, + "content": "Here is the audio." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.623, + 0.802, + 0.678 + ], + "angle": 0, + "content": "Please design three questions that gradually become more challenging, starting from basic factual questions, but don't deviate from the content of the audio itself. If it's a single-choice question, please give four options like (a) one, (b) two, .... and the answer should be analyzed and end with a format like \"the answer is B.\"" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.092, + 0.672, + 0.108 + ], + "angle": 0, + "content": "A.2 Prompt of Stage 2 when Processing Data (Sample from AudioSet)" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.121, + 0.8, + 0.344 + ], + "angle": 0, + "content": "We are now designing a system to generate structured audio-based chain-of-thought reasoning data. You will receive an audio clip, its textual description, as well as a question and its answer. Your task is to explore in more detail the thinking process from the question to the answer. Your chain of thought will be divided into two main parts, the and sections. In the section, you need to go through four steps: planning, captioning, reasoning, and summarizing. The section is invisible to the user. Therefore, in the section, you need to base on all the reasoning processes and results in the section and provide a final reply based on the question. Finally, your reply should strictly follow the following format: (In this part, analyze the user's needs and how to complete the task. If the problem is complex, it should be done step by step) (In this part, conduct an overall analysis of the given audio input content, try to find all the parts related to the question, describe them, and ensure it is completely correct.) (Start reasoning towards the final answer here) (Draw appropriate results based on the reasoning part here) Give the final answer here referring to the part Please strictly follow the format of the sample." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.356, + 0.258, + 0.369 + ], + "angle": 0, + "content": "Sample:" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.37, + 0.27, + 0.383 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.384, + 0.303, + 0.396 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.397, + 0.761, + 0.411 + ], + "angle": 0, + "content": "The user wants to understand the dynamic changes within the provided audio clip ...." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.412, + 0.307, + 0.424 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.425, + 0.29, + 0.437 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.438, + 0.798, + 0.466 + ], + "angle": 0, + "content": "The audio clip predominantly features static noise. .... similar to that of a detuned television or a device failing to receive a signal." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.467, + 0.295, + 0.479 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.48, + 0.313, + 0.492 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.494, + 0.798, + 0.548 + ], + "angle": 0, + "content": "1. Identify changes in Intensity (Volume): The audio's static noise does not remain at a constant volume. There are noticeable increases and decreases in loudness throughout the clip. .... is struggling to maintain a consistent output, adding to the impression of something malfunctioning or broken." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.549, + 0.318, + 0.561 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.563, + 0.303, + 0.575 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.576, + 0.798, + 0.604 + ], + "angle": 0, + "content": "The static noise in the audio is highly dynamic. .... leading to a sense of disorder and instability." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.605, + 0.307, + 0.617 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.618, + 0.275, + 0.63 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.632, + 0.301, + 0.644 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.645, + 0.798, + 0.673 + ], + "angle": 0, + "content": "The audio presents a static noise, .... The overall effect is one of energetic chaos, preventing any possibility of calm or predictability." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.674, + 0.305, + 0.685 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.7, + 0.8, + 0.796 + ], + "angle": 0, + "content": "Note that you have both the question and the answer because it is necessary to ensure the correctness of the chain of thought. However, in your response, you can only refer to the content of the question and the audio, and lead to the answer. You must absolutely not assume that you already know the answer. Please provide a detailed and flexible response with high-quality logic in both the caption and reasoning sections. If the reasoning part requires complex logic, you can even propose several different approaches and try them one by one. Here is the original description: *** caption here ***." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.797, + 0.464, + 0.81 + ], + "angle": 0, + "content": "The question is: *** question here ***." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.811, + 0.542, + 0.824 + ], + "angle": 0, + "content": "The answer you can refer to : *** answer here ***." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.825, + 0.8, + 0.866 + ], + "angle": 0, + "content": "Again, don't mention that you have the answer and the description because they are only here to help you to design the chain of thought but should not exist in the real-world scenario, either in the think or response sections." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.092, + 0.672, + 0.107 + ], + "angle": 0, + "content": "A.3 Prompt of Stage 3 when Processing Data (Sample from AudioSet)" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.12, + 0.803, + 0.189 + ], + "angle": 0, + "content": "We are data reviewers. Next, you will receive an audio clip, along with its description, questions, answers, and most importantly, the thought process for solving the problems. Please determine and analyze whether all of these elements are completely correct, especially check if there are any hallucinations in the thought process. Return if there are no issues, and if there are errors in the data." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.19, + 0.588, + 0.205 + ], + "angle": 0, + "content": "Here is the description of the audio: *** caption here ***." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.205, + 0.495, + 0.218 + ], + "angle": 0, + "content": "Here is the question: *** question here ***." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.218, + 0.478, + 0.231 + ], + "angle": 0, + "content": "Here is the answer: *** answer here ***." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.232, + 0.603, + 0.246 + ], + "angle": 0, + "content": "And here is the thought process: *** COT process here ***." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.246, + 0.799, + 0.273 + ], + "angle": 0, + "content": "Please conduct a thorough judgment and analysis and provide the result in the specified format." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.295, + 0.505, + 0.312 + ], + "angle": 0, + "content": "B Synthetic Data Generation Pipeline" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.319, + 0.412, + 0.334 + ], + "angle": 0, + "content": "B.1 Synthetic Data Introduction" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.337, + 0.827, + 0.462 + ], + "angle": 0, + "content": "Multi-Speaker Dataset: To enhance the model's ability to comprehend complex, multi-turn conversations among multiple speakers, we constructed the Multi-Speaker dataset using text-to-speech (TTS) technology. The dataset generation process consists of three steps: (1) Conversation Text Generation: We utilized commercial models to generate diverse multi-speaker conversation texts covering a wide range of scenarios. (2) Speech Synthesis: Leveraging all available timbres from LibriSpeech [Panayotov et al., 2015] as prompts, we employed the CosyVoice2 [Du et al., 2024] framework to synthesize high-quality speech samples. (3) Dataset Assembly: The synthesized speech samples, fearring distinct timbres, were carefully combined to create a rich and diverse multi-speaker conversation dataset." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.468, + 0.825, + 0.497 + ], + "angle": 0, + "content": "This approach ensures that the dataset effectively trains the model to navigate intricate speaker interactions in natural conversations." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.503, + 0.825, + 0.628 + ], + "angle": 0, + "content": "Complex Audio Dataset: To strengthen the model's ability to process intricate audio compositions, we developed the Complex Audio dataset. The audio materials are sourced from AudioSet, AudioCaps, and our internal material library. The dataset consists of two primary types of audio data: (1) Temporally Complex Audio: Short audio clips were sequentially arranged, requiring the model to perform tasks such as sorting or counting based on temporal relationships. We sequentially arranged different short audio clips and required the model to perform sorting or counting tasks. (2) Composite Complex Audio: Multiple long-form audio tracks—such as engine sounds and musical instruments—were blended. The model was then tasked with extracting specific target sounds from the composite audio, enhancing its ability to analyze overlapping audio sources." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.633, + 0.825, + 0.663 + ], + "angle": 0, + "content": "This dataset ensures that the model is well-equipped to handle real-world audio challenges, such as distinguishing between multiple concurrent sounds and identifying temporal patterns." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.667, + 0.825, + 0.698 + ], + "angle": 0, + "content": "To better understand what Multi-Speaker dataset and Complex Audio dataset are respectively tasked with, we provide some examples in Sec. B.2, Sec. B.3." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.092, + 0.476, + 0.107 + ], + "angle": 0, + "content": "B.2 Sample from Complex Audio Dataset" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.121, + 0.272, + 0.134 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.135, + 0.303, + 0.148 + ], + "angle": 0, + "content": "" + }, + { + "type": "list", + "bbox": [ + 0.197, + 0.121, + 0.303, + 0.148 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.149, + 0.8, + 0.203 + ], + "angle": 0, + "content": "The question is about identifying the sounds in the audio and arranging them in the correct order. I will carefully listen to the audio multiple times, noting down the sounds as I recognize them. Then, I will compare the identified sound sequence with the provided answer options to choose the one that matches best." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.204, + 0.308, + 0.216 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.218, + 0.802, + 0.272 + ], + "angle": 0, + "content": "Upon initial listening, I can discern sounds that seem like a shattering noise, an animal sound which I think sounds like a cow, a cat sound, and what sounds like some mechanical tools working, perhaps a chainsaw. I shall listen again to clarify the sounds and their order." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.273, + 0.295, + 0.285 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.286, + 0.72, + 0.301 + ], + "angle": 0, + "content": "1. After relistening, the first sound is definitely glass breaking." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.301, + 0.518, + 0.315 + ], + "angle": 0, + "content": "2. The second sound is definitely a cow moing." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.315, + 0.44, + 0.328 + ], + "angle": 0, + "content": "3. The third sound is a cat meowing." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.328, + 0.425, + 0.341 + ], + "angle": 0, + "content": "4. The fourth sound is a chainsaw." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.342, + 0.769, + 0.356 + ], + "angle": 0, + "content": "5. I compare this sequence (glass break, cow, cat, chainsaw) with the options provided." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.356, + 0.486, + 0.369 + ], + "angle": 0, + "content": "6. Option (a) matches the order I identified." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.37, + 0.318, + 0.382 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.383, + 0.799, + 0.411 + ], + "angle": 0, + "content": "Based on the detailed listening and analysis, the correct order of sounds in the audio is: glass break, cow, cat, chainsaw. This matches option (a)." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.411, + 0.307, + 0.423 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.424, + 0.274, + 0.437 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.438, + 0.301, + 0.451 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.452, + 0.308, + 0.465 + ], + "angle": 0, + "content": "The answer is a." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.466, + 0.306, + 0.479 + ], + "angle": 0, + "content": "" + }, + { + "type": "list", + "bbox": [ + 0.197, + 0.301, + 0.799, + 0.479 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.092, + 0.47, + 0.107 + ], + "angle": 0, + "content": "B.3 Sample from Multi-Speaker Dataset" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.121, + 0.272, + 0.134 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.135, + 0.303, + 0.148 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.149, + 0.8, + 0.204 + ], + "angle": 0, + "content": "The user asks me to identify the number of speakers in the audio clip. To do this, I will listen carefully and try to distinguish unique voices. The conversation is between two hikers, I need to confirm this by identifying their individual lines and vocal characteristics. Finally, I will provide the answer based on the number of identified speakers." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.205, + 0.308, + 0.216 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.218, + 0.291, + 0.23 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.232, + 0.636, + 0.245 + ], + "angle": 0, + "content": "The audio presents a conversation between two individuals hiking." + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.246, + 0.802, + 0.341 + ], + "angle": 0, + "content": "Speaker 1: This person starts by commenting on the intensity of the climb and expresses doubt about continuing without a break. Later, after a reminder about the ranger's warning, this speaker agrees to turn back. The vocal tone suggests exertion and a desire to reach the summit, eventually giving way to agreement. Phrases spoken: \"Wow, this climb is intense. I am not sure I can go much further without a serious break,\" \"Turn back. But we're so close to the summit. I really want to make it, you know,\" \"Unpredictable mountain weather, okay. Okay, you are right.\"" + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.342, + 0.803, + 0.424 + ], + "angle": 0, + "content": "Speaker 2: This person responds to the first speaker's concerns, suggesting they turn back due to the darkening sky and diminishing daylight. They emphasize safety and remind the other speaker about the ranger's warning. The vocal tone conveys concern and prioritizes safety. Phrases spoken: \"I know right. I am starting to think we should turn back. That sky looks awfully dark, and we're losing daylight quickly,\" \"I hear you, but safety first. Remember what the ranger said about the unpredictable mountain weather,\" \"So, turn back.\"" + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.424, + 0.801, + 0.452 + ], + "angle": 0, + "content": "Based on the clear distinction in vocal tones and the back-and-forth nature of the dialogue, I can confirm that there are two distinct speakers in this audio clip." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.453, + 0.295, + 0.465 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.466, + 0.313, + 0.478 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.479, + 0.802, + 0.52 + ], + "angle": 0, + "content": "Initial Assessment: The audio starts with two distinct voices in a hiking - related dialogue. The voices are clearly distinguishable from the get - go, with one having a deeper timbre and the other a higher - pitched one." + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.521, + 0.802, + 0.575 + ], + "angle": 0, + "content": "Dialogue Analysis: A close look at the content shows a clear difference in the speakers' stances. One is eager to press on to the summit, saying, \"We're almost there. Let's keep going.\" The other is more cautious, citing the darkening sky and fading daylight, stating, \"The weather looks bad. We should turn back.\"" + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.576, + 0.801, + 0.618 + ], + "angle": 0, + "content": "Vocal Tone Differentiation: Each speaker's vocal tone stays consistent. The eager speaker has an energetic and determined tone, with a rising pitch when advocating for continuing. The cautious speaker has a measured, worried tone, with a lower pitch when warning of danger." + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.618, + 0.802, + 0.631 + ], + "angle": 0, + "content": "Confirming Speaker Count: A review of the audio shows no extra voices or interruptions." + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.632, + 0.801, + 0.645 + ], + "angle": 0, + "content": "The conversation moves smoothly between the two speakers, indicating only two are present." + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.646, + 0.745, + 0.658 + ], + "angle": 0, + "content": "Final Answer: Based on this analysis, there are two individuals in the conversation." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.659, + 0.346, + 0.671 + ], + "angle": 0, + "content": "Thus, the answer is B." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.672, + 0.318, + 0.685 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.686, + 0.303, + 0.699 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.7, + 0.802, + 0.714 + ], + "angle": 0, + "content": "Based on the dialogue and vocal tone analysis, there are two distinct speakers in the audio." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.714, + 0.312, + 0.727 + ], + "angle": 0, + "content": "The answer is B." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.728, + 0.308, + 0.74 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.741, + 0.275, + 0.754 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.755, + 0.302, + 0.768 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.769, + 0.801, + 0.796 + ], + "angle": 0, + "content": "Based on the dialogue and distinct vocal tones present in the audio, I have identified two individuals engaged in the conversation. Therefore, the answer is B." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.797, + 0.306, + 0.809 + ], + "angle": 0, + "content": "" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.173, + 0.09, + 0.423, + 0.108 + ], + "angle": 0, + "content": "C Further Dataset Analysis" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.123, + 0.825, + 0.346 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.356, + 0.825, + 0.386 + ], + "angle": 0, + "content": "Figure 6: Detailed information of the CoTA dataset: The maximum, minimum, and average number of words in the answers, as well as the average length of the audio." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.399, + 0.825, + 0.483 + ], + "angle": 0, + "content": "CoTA's reasoning adaptability is also evident in its distribution of token lengths and audio duration across tasks in Figure 6. The dataset accommodates a broad range of reasoning complexities, with token lengths spanning 327 (CoVoST2) to 1,454 (Multi-Speaker), ensuring coverage of both concise and highly intricate reasoning processes. Notably, tasks requiring deep logical inference, such as complex audio, exhibit a well-balanced token distribution \\((\\max = 423, \\text{avg} = 192.96)\\), supporting structured multi-step reasoning without unnecessary redundancy." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.488, + 0.827, + 0.6 + ], + "angle": 0, + "content": "Further, the dataset's average token lengths (164.48-481.57) align with task difficulty: longer reasoning chains characterize tasks such as sound description (AudioSet: 395.26) and music understanding (MusicBench: 463.89). Meanwhile, CoTA ensures practical generalization by maintaining audio durations between 2.85s and 26.34s, where shorter clips (e.g., MELD: 2.84s) support concise context-dependent reasoning, while extended sequences (Multi-Speaker: 26.34s) enable complex multi-turn inference. This systematic variation in reasoning depth and audio granularity ensures adaptability across diverse tasks, addressing the limitation of one-size-fits-all reasoning chains in existing audio datasets." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.935, + 0.509, + 0.948 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.09, + 0.368, + 0.106 + ], + "angle": 0, + "content": "D More Case Studies" + }, + { + "type": "image", + "bbox": [ + 0.199, + 0.143, + 0.241, + 0.175 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.207, + 0.178, + 0.236, + 0.188 + ], + "angle": 0, + "content": "User" + }, + { + "type": "text", + "bbox": [ + 0.286, + 0.149, + 0.745, + 0.201 + ], + "angle": 0, + "content": "Analyze the interplay between the vocal sample, synth elements, and percussion within the track. How do these elements contribute to the overall texture and dynamic progression of the piece, particularly focusing on the shift from the intro to the main rhythm?" + }, + { + "type": "image", + "bbox": [ + 0.288, + 0.203, + 0.461, + 0.227 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.754, + 0.238, + 0.795, + 0.271 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.748, + 0.273, + 0.805, + 0.293 + ], + "angle": 0, + "content": "Audio Reasoner" + }, + { + "type": "title", + "bbox": [ + 0.238, + 0.267, + 0.29, + 0.276 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.27, + 0.277, + 0.721, + 0.332 + ], + "angle": 0, + "content": "The user is asking us to identify potential influences or subgenres within rock music based on the provided instrumental track's instrumentation, chord progression, and overall sonic character. I will start with analyzing the chord progression, instrumentation, and overall sonic character of the music track. Then I will use these analyses to deduce potential influences and subgenres." + }, + { + "type": "text", + "bbox": [ + 0.27, + 0.342, + 0.721, + 0.399 + ], + "angle": 0, + "content": "The music features a rock-oriented instrumental piece. The lead melody is carried by an electric guitar, featuring a slight overdrive or distortion. Acoustic rhythm guitars provide harmonic support, panned to the left and right. A slap-style bass guitar and acoustic drums form the rhythm section. The song follows a medium tempo in 4/4 time, with a chord progression of Bb, Ab6, F, and Ab6." + }, + { + "type": "text", + "bbox": [ + 0.27, + 0.409, + 0.722, + 0.53 + ], + "angle": 0, + "content": " The chord progression, Bb, Ab6, F, Ab6, has blues rock similarities because it includes flat 7 chords. Electric guitar with overdrive or distortion, a driving rhythm section (bass and drums), and acoustic rhythm guitars are characteristics of rock music. Slap bass is used in funk music. Given these considerations, several potential influences and subgenres present themselves. The blues and rock elements of the chord progression could indicate the influence of blues rock and hard rock, which often incorporate blues-based progressions with louder, more aggressive instrumentation. The slap bassline suggests a funk influence, which can be found in funk rock and other rock subgenres that incorporate funk elements. The overall sonic character is most indicative of rock, with potential cross-pollination from blues and funk." + }, + { + "type": "text", + "bbox": [ + 0.27, + 0.541, + 0.717, + 0.585 + ], + "angle": 0, + "content": "Based on the chord progression of Bb, Ab6, F, Ab6, the prominent electric guitar solo with overdrive, the acoustic rhythm guitars, the slap-style bass, and the acoustic drums, the music has potential influences or subgenres within rock music. These may include blues rock, hard rock, and funk rock." + }, + { + "type": "title", + "bbox": [ + 0.238, + 0.587, + 0.293, + 0.596 + ], + "angle": 0, + "content": "" + }, + { + "type": "title", + "bbox": [ + 0.238, + 0.598, + 0.321, + 0.607 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.27, + 0.608, + 0.724, + 0.661 + ], + "angle": 0, + "content": "Considering the instrumentation, the chord progression (Bb, Ab6, F, Ab6), and the overall sonic character of the piece, this instrumental track may draw upon influences from several subgenres within rock music. The chord progression has blues and rock similarities, pointing towards blues-rock and hard-rock elements. Moreover, the slap bassline indicates the touch of funk." + }, + { + "type": "title", + "bbox": [ + 0.238, + 0.664, + 0.324, + 0.673 + ], + "angle": 0, + "content": "" + }, + { + "type": "image_caption", + "bbox": [ + 0.261, + 0.716, + 0.736, + 0.731 + ], + "angle": 0, + "content": "Figure 7: Example of Audio-Reasoner answering music-based question." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.199, + 0.217, + 0.241, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.207, + 0.253, + 0.236, + 0.262 + ], + "angle": 0, + "content": "User" + }, + { + "type": "text", + "bbox": [ + 0.286, + 0.223, + 0.751, + 0.263 + ], + "angle": 0, + "content": "Considering the sounds present in the audio, describe the likely environment in which the dog is located and detail the types of interactions or activities the dog might be engaged in." + }, + { + "type": "image", + "bbox": [ + 0.288, + 0.264, + 0.462, + 0.289 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.238, + 0.321, + 0.289, + 0.33 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.331, + 0.71, + 0.365 + ], + "angle": 0, + "content": "The question asks about the dog's environment and activities based on the audio. I need to carefully analyze the audio to identify relevant sounds, infer the setting, and deduce potential interactions." + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.375, + 0.724, + 0.442 + ], + "angle": 0, + "content": "The audio contains dog barks of varying intensity and pitch. There are also sounds suggesting movement, such as rustling or the distinct sounds of the dog moving on what sounds to be hard floor surfaces. These movement sounds occur both during and between the barking, with the barks themselves being the main focus of sound. The pitch of the bark changes at different points, from higher to lower and back again." + }, + { + "type": "title", + "bbox": [ + 0.27, + 0.453, + 0.358, + 0.462 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.464, + 0.709, + 0.508 + ], + "angle": 0, + "content": "1. **Environment Inference:** The presence of hard floor surface sounds, coupled with the absence of specific outdoor environmental noises (e.g., wind, traffic, birdsong), leans towards an indoor location. The changing pitch and tone of barks show the arousal level is rapidly changing." + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.508, + 0.697, + 0.54 + ], + "angle": 0, + "content": "2. **Activity Inference:** The barking, interspersed with the sound of movement, indicates the dog is active. The varied bark intensity could suggest excitement, playfulness, or even alert behavior in relation to other sounds." + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.541, + 0.721, + 0.574 + ], + "angle": 0, + "content": "3. **Interaction Possibilities:** Based on the sounds alone, it's difficult to pinpoint the exact interaction. It could be playing with a toy, reacting to someone entering the room, or engaging in a game." + }, + { + "type": "list", + "bbox": [ + 0.269, + 0.464, + 0.721, + 0.574 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.585, + 0.721, + 0.639 + ], + "angle": 0, + "content": "\\nThe dog is likely indoors on a hard floor surface. The audio suggests the dog is actively moving and barking, possibly engaged in play or reacting to something in its immediate surroundings. The dog is probably reacting to a stimulus within its environment, resulting in a change in the dog's arousal and tone and pitch of barks." + }, + { + "type": "title", + "bbox": [ + 0.238, + 0.641, + 0.292, + 0.65 + ], + "angle": 0, + "content": "" + }, + { + "type": "title", + "bbox": [ + 0.238, + 0.652, + 0.321, + 0.661 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.662, + 0.721, + 0.728 + ], + "angle": 0, + "content": "Based on the audio, the dog is likely located in an indoor environment with hard floor surface sounds. The varied intensity and sound of the dog's barks, combined with the sound of its movement, indicate the dog is actively moving and vocalizing, and that the arousal level of the dog is actively changing. Given this information, the dog might be engaged in some activity or play, perhaps reacting to a toy or another person within the room." + }, + { + "type": "title", + "bbox": [ + 0.238, + 0.729, + 0.324, + 0.739 + ], + "angle": 0, + "content": "" + }, + { + "type": "image_caption", + "bbox": [ + 0.261, + 0.791, + 0.736, + 0.807 + ], + "angle": 0, + "content": "Figure 8: Example of Audio-Reasoner answering sound-based question." + }, + { + "type": "image", + "bbox": [ + 0.755, + 0.304, + 0.796, + 0.338 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.747, + 0.339, + 0.805, + 0.36 + ], + "angle": 0, + "content": "Audio Reasoner" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.199, + 0.217, + 0.242, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.207, + 0.253, + 0.236, + 0.262 + ], + "angle": 0, + "content": "User" + }, + { + "type": "text", + "bbox": [ + 0.286, + 0.223, + 0.625, + 0.236 + ], + "angle": 0, + "content": "What did the second person say about the rose bush?" + }, + { + "type": "text", + "bbox": [ + 0.288, + 0.237, + 0.381, + 0.248 + ], + "angle": 0, + "content": "(a) They like it." + }, + { + "type": "text", + "bbox": [ + 0.288, + 0.249, + 0.499, + 0.261 + ], + "angle": 0, + "content": "(b) It's the bane of their existence." + }, + { + "type": "text", + "bbox": [ + 0.288, + 0.262, + 0.451, + 0.274 + ], + "angle": 0, + "content": "(c) It needs more fertilizer." + }, + { + "type": "text", + "bbox": [ + 0.288, + 0.275, + 0.455, + 0.288 + ], + "angle": 0, + "content": "(d) It provides good shade." + }, + { + "type": "list", + "bbox": [ + 0.288, + 0.237, + 0.499, + 0.288 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.288, + 0.29, + 0.461, + 0.313 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.238, + 0.332, + 0.29, + 0.342 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.343, + 0.717, + 0.398 + ], + "angle": 0, + "content": "The question asks about what the second person said about the rose bush. The audio presents a conversation between two neighbors, Amelia and Ben. I need to identify who the second speaker is and what they said about the rose bush. Then, I need to match what the second speaker said with the given options." + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.409, + 0.714, + 0.431 + ], + "angle": 0, + "content": "The audio presents a conversation between Amelia and Ben about the upcoming block party and a troublesome rose bush." + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.431, + 0.718, + 0.475 + ], + "angle": 0, + "content": "**Speaker 1 (Amelia)**: Asks Ben if he's attending the block party and mentions the rose bush causing him trouble. She says \"You mentioned weedies, that rose bush still causing you trouble, the one that keeps growing over the fence?\". She also mentions she has been scratched by it." + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.476, + 0.718, + 0.53 + ], + "angle": 0, + "content": "**Speaker 2 (Ben)**: Confirms he's attending the block party, entering his chili in the cook-off, and complains about the rose bush. He says \"That thorny rose bush is the bane of my existence. It's constantly scratching me when I am gardening. So, yes, it's still causing me trouble.\" and suggests chopping it down. \\(\\backslash\\)n\\`nTherefore, Ben is the second speaker." + }, + { + "type": "title", + "bbox": [ + 0.269, + 0.541, + 0.358, + 0.551 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.552, + 0.714, + 0.563 + ], + "angle": 0, + "content": "1. **Identify the second speaker:** Based on the audio, Ben is the second speaker." + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.564, + 0.714, + 0.596 + ], + "angle": 0, + "content": "2. **Identify what Ben said about the rose bush:** Ben says \"That thorny rose bush is the bane of my existence. It's constantly scratching me when I am gardening. So, yes, it's still causing me trouble.\" and suggests chopping it down." + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.597, + 0.515, + 0.607 + ], + "angle": 0, + "content": "3. **Match Ben's statement with the options:**" + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.608, + 0.685, + 0.618 + ], + "angle": 0, + "content": "- (a) They like it. - This is incorrect because Ben clearly dislikes the rose bush." + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.619, + 0.692, + 0.629 + ], + "angle": 0, + "content": "- (b) It's the bane of their existence. - This aligns perfectly with Ben's statement." + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.63, + 0.61, + 0.64 + ], + "angle": 0, + "content": "- (c) It needs more fertilizer. - This is not mentioned in the audio." + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.641, + 0.64, + 0.651 + ], + "angle": 0, + "content": "- (d) It provides good shade. - This is also not mentioned in the audio." + }, + { + "type": "list", + "bbox": [ + 0.269, + 0.552, + 0.714, + 0.651 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.27, + 0.652, + 0.551, + 0.662 + ], + "angle": 0, + "content": "Therefore, the correct answer is (b)." + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.673, + 0.705, + 0.706 + ], + "angle": 0, + "content": "\\nBased on the audio, the second speaker, Ben, describes the rose bush as \"the bane of my existence\" and complains about it scratching him. Therefore, the answer is (b). " + }, + { + "type": "title", + "bbox": [ + 0.238, + 0.707, + 0.293, + 0.717 + ], + "angle": 0, + "content": "" + }, + { + "type": "title", + "bbox": [ + 0.238, + 0.718, + 0.321, + 0.728 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.729, + 0.692, + 0.75 + ], + "angle": 0, + "content": "The second person, Ben, explicitly states that the rose bush \\(\"is the bane of my existence\"\\). Therefore, the answer is B." + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.751, + 0.325, + 0.761 + ], + "angle": 0, + "content": "" + }, + { + "type": "image_caption", + "bbox": [ + 0.21, + 0.791, + 0.785, + 0.807 + ], + "angle": 0, + "content": "Figure 9: Example of Audio-Reasoner answering multi-speaker speech-based question." + }, + { + "type": "image", + "bbox": [ + 0.754, + 0.334, + 0.796, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.754, + 0.368, + 0.793, + 0.377 + ], + "angle": 0, + "content": "Audio" + }, + { + "type": "image_caption", + "bbox": [ + 0.746, + 0.379, + 0.805, + 0.389 + ], + "angle": 0, + "content": "Reasoner" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.199, + 0.217, + 0.241, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.207, + 0.253, + 0.236, + 0.262 + ], + "angle": 0, + "content": "User" + }, + { + "type": "text", + "bbox": [ + 0.286, + 0.223, + 0.71, + 0.237 + ], + "angle": 0, + "content": "Given that you hear buzzing plastic, identify the background sound?" + }, + { + "type": "text", + "bbox": [ + 0.289, + 0.237, + 0.486, + 0.249 + ], + "angle": 0, + "content": "(a). A washing machine running" + }, + { + "type": "text", + "bbox": [ + 0.289, + 0.25, + 0.466, + 0.261 + ], + "angle": 0, + "content": "(b). Rain falling on a window" + }, + { + "type": "text", + "bbox": [ + 0.289, + 0.263, + 0.41, + 0.274 + ], + "angle": 0, + "content": "(c). A toilet flushing" + }, + { + "type": "text", + "bbox": [ + 0.289, + 0.276, + 0.484, + 0.288 + ], + "angle": 0, + "content": "(d). Waves crashing on a shore" + }, + { + "type": "list", + "bbox": [ + 0.289, + 0.237, + 0.486, + 0.288 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.288, + 0.29, + 0.462, + 0.314 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.238, + 0.331, + 0.29, + 0.342 + ], + "angle": 0, + "content": "" + }, + { + "type": "title", + "bbox": [ + 0.27, + 0.343, + 0.348, + 0.352 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.354, + 0.723, + 0.409 + ], + "angle": 0, + "content": "I will carefully and methodically approach the task of identifying the background sound in the audio clip. This will allow me to focus solely on the characteristics of the background noise. Once the background sound is isolated, I will analyze it in detail to determine its key qualities, such as its consistency, rhythm, and texture. Afterward, I will compare these characteristics to the provided answer options." + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.42, + 0.718, + 0.486 + ], + "angle": 0, + "content": " Upon my initial listening, I observed that the background sound possesses characteristics that suggest fluid movement. There is a continuous, almost rhythmic, sound with subtle gurgling and sloshing. This could indicate the presence of water or some other flowing liquid. As I continue to listen more carefully, the flow becomes even more evident, suggesting a consistent rush or stream-like sound. " + }, + { + "type": "title", + "bbox": [ + 0.27, + 0.497, + 0.357, + 0.507 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.509, + 0.718, + 0.552 + ], + "angle": 0, + "content": "1. **Sound Characteristics:** My first observation is that the background sound features a continuous, almost rhythmic quality, with faint gurgling noises. These characteristics often point to a fluid in motion. Upon further listening, I recognize that the sound might be created by a liquid flowing from one place to another." + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.553, + 0.72, + 0.585 + ], + "angle": 0, + "content": "2. **Detailed Listening:** I focus on the quieter sections of the sound, where the fluid dynamics become clearer. The consistent, almost rushing quality suggests the movement of water rather than irregular drops or splashes." + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.585, + 0.721, + 0.64 + ], + "angle": 0, + "content": "3. **Comparison to Options:** With this in mind, I now compare the sound characteristics to the provided answer options. - \"A toilet flushing,\" which often involves a steady rush of water with gurgling and sloshing as the water flows through pipes. This closely matches the consistent flow and gurgling noises I hear in the background." + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.641, + 0.712, + 0.673 + ], + "angle": 0, + "content": "4. **Conclusion:** Based on my analysis, the sound best aligns with the description of a toilet flushing, due to the nature of the consistent, rushing water and subtle gurgling sounds. " + }, + { + "type": "list", + "bbox": [ + 0.269, + 0.509, + 0.721, + 0.673 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.238, + 0.674, + 0.292, + 0.683 + ], + "angle": 0, + "content": "" + }, + { + "type": "title", + "bbox": [ + 0.238, + 0.684, + 0.321, + 0.694 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.696, + 0.718, + 0.75 + ], + "angle": 0, + "content": "After listening to the audio carefully, isolating the background sound, and comparing it to the answer options, I concluded that the background sound most closely matches the sound of a toilet flushing. The key factors leading to this conclusion include the continuous rush of water and the gurgling quality, which are typical of a flushing toilet." + }, + { + "type": "title", + "bbox": [ + 0.238, + 0.751, + 0.323, + 0.761 + ], + "angle": 0, + "content": "" + }, + { + "type": "image_caption", + "bbox": [ + 0.227, + 0.791, + 0.769, + 0.807 + ], + "angle": 0, + "content": "Figure 10: Example of Audio-Reasoner answering complex-audio-based question." + }, + { + "type": "image", + "bbox": [ + 0.754, + 0.334, + 0.796, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.755, + 0.368, + 0.793, + 0.378 + ], + "angle": 0, + "content": "Audio" + }, + { + "type": "image_caption", + "bbox": [ + 0.747, + 0.38, + 0.805, + 0.389 + ], + "angle": 0, + "content": "Reasoner" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "27" + } + ] +] \ No newline at end of file diff --git a/data/2025/2503_02xxx/2503.02318/185a8d9e-02ca-40b7-9498-1733bd88ebda_origin.pdf b/data/2025/2503_02xxx/2503.02318/185a8d9e-02ca-40b7-9498-1733bd88ebda_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..24ee807e635d6f0d90327cac93970eb6ad947cf4 --- /dev/null +++ b/data/2025/2503_02xxx/2503.02318/185a8d9e-02ca-40b7-9498-1733bd88ebda_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7080d659f390283f0293a487f17dc6aaf0613e1f6e888d3f9906febfd7a48754 +size 2117618 diff --git a/data/2025/2503_02xxx/2503.02318/full.md b/data/2025/2503_02xxx/2503.02318/full.md new file mode 100644 index 0000000000000000000000000000000000000000..83b8ac105de1f2c056d791f044141f1d37823cfc --- /dev/null +++ b/data/2025/2503_02xxx/2503.02318/full.md @@ -0,0 +1,720 @@ +# Audio-Reasoner: Improving Reasoning Capability in Large Audio Language Models + +Zhifei Xie $^{1\dagger}$ Mingbao Lin $^{3\dagger}$ Zihang Liu $^{2\dagger}$ + +Pengcheng Wu1 Shuicheng Yan2† Chunyan Miao1‡ + +$^{1}$ Nanyang Technological University $^{2}$ National University of Singapore $^{3}$ Rakuten + +†Equal Contributions ‡Corresponding Authors + +zhifei001@e.ntu.edu.sg linmb001@outlook.com liuzihang99@gmail.com + +pengchengwu@ntu.edu.sg yansc@nus.edu.sg ascymiao@ntu.edu.sg + +Project: https://github.com/xzf-thu/Audio-Reasoner + +# Abstract + +Recent advancements in multimodal reasoning have largely overlooked the audio modality. We introduce Audio-Reasoner, a large-scale audio language model for deep reasoning in audio tasks. We curate a diverse collection of multi-task audio datasets with simple annotations, refining them through structured secondary labeling and complex question generation. Additionally, We utilize advanced closed-source models to generate structured reasoning chains, transforming raw annotations into a formatted inference process. Following inference scaling principles, we train Audio-Reasoner on CoTA, a high-quality reasoning dataset with 1.2 million reasoning-rich samples. Experiments show state-of-the-art performance across key benchmarks, including MMAU-mini $(+25.42\%)$ , AIR-Bench chat/foundation $(+14.57\% / + 10.13\%)$ , and MELD $(+8.01\%)$ . Our findings stress the core of structured CoT training in advancing audio reasoning. + +![](images/6b8d76aec6a568542a810d60105df57d132003453be82af5476ad0dd56265004.jpg) +Figure 1: Benchmark performance of Audio-Reasoner on tasks of speech emotion recognition (SER), speech-to-text translation (S2TT), MMAU, and AIR-Bench chat. S2TT is measured using BLEU while SER and MMAU are measured ACC (left axis). AIR-Bench is scored by GPT (right axis). + +# 1 Introduction + +Recent advancements in large language models (LLMs) have significantly enhanced their reasoning capabilities, driven by innovations like chain-of-thought (CoT) and inference scaling. Models such as OpenAI's o1 [Jaech et al., 2024] and Deepseek-R1 [Guo et al., 2025] have demonstrated significant improvements, especially in tasks like mathematics and coding [Team et al., 2025, Zhao et al., 2024a, Muennighoff et al., 2025, Liu et al., 2024a, Zhang et al., 2024b, Deng et al., 2024, Hui et al., 2024, Yang et al., 2024a]. These models have set a precedent for achieving "deep thinking" by tackling complex, structured tasks. The CoT framework, in particular, has been successfully applied to multimodal models, improving their cognitive abilities in image and video reasoning. Models such as Visual-CoT [Shao et al., 2024a], LLaVA-Reasoner [Zhang et al., 2024a], and MAmmoTH-VL [Guo et al., 2024] have demonstrated that large-scale datasets and multidimensional reasoning can enhance model performance, even in tasks involving images and videos [Zou et al., 2023]. Other works, like Mulberry [Wen et al., 2019] and Image-of-Thought [Zhou et al., 2024], integrate reflective and image-editing tools to further refine multimodal reasoning, offering new pathways for understanding complex queries across multiple modalities. + +However, the application of CoT in the audio modality has been largely underexplored. Although models like Audio Flamingo [Kong et al., 2024], SALMONN [Tang et al., 2023], and Qwen2-Audio [Chu et al., 2024] have pushed the boundaries of large audio language models (LALMs), these advancements have not yet incorporated CoT reasoning at scale. Recent research [Ma et al., 2025] suggests that existing CoT methods, such as zero-shot reasoning in audio tasks, fail to significantly improve performance on more complex queries. This limitation is largely attributed to the simplicity of existing audio datasets—such as AudioSet [Gemmeke et al., 2017], AudioCaps [Kim et al., 2019], and Clotho [Drossos et al., 2020)—which predominantly feature short, simple labels. These simplified datasets hinder the development of LALMs capable of more intricate reasoning. Without richer, more complex data, these models struggle with long-form reasoning, and the application of CoT often leads to severe hallucinations and degraded performance. Therefore, advancing CoT in LALMs necessitates overcoming these dataset limitations to allow for more effective, deep reasoning. + +To address the challenges in audio-based reasoning, we propose a scalable and effective approach to generating high-quality pretraining data. Using state-of-the-art commercial models, we introduce CoTA, a large-scale dataset containing 1.2 million refined captions and question-answer (QA) pairs. CoTA spans multiple datasets and tasks, undergoing rigorous filtering to ensure diversity and quality. Building on CoTA, we develop Audio-Reasoner, a large audio language model designed for long-context reasoning. Audio-Reasoner is trained with a 4K token context window and generates structured CoT reasoning with length could more than exceeding 1K tokens in real-world tasks. The model is fine-tuned on CoTA using supervised fine-tuning, adhering to a structured reasoning framework, as illustrated in Figure 2: (1) Planning—Identifies key problem components from the user query and formulates a structured sequence of reasoning steps essential for deriving an answer. (2) Caption—Extracts and integrates relevant multimodal content from the input to enrich the reasoning process. (3) Reasoning—Executes a systematic, step-by-step reasoning procedure to ensure logical coherence and accuracy. (4) Summary—Synthesizes the reasoning process into a final response that is concise, contextually grounded, and precise. + +Our experimental results, partially presented in Figure 1, demonstrate the effectiveness of Audio-Reasoner. More comprehensively, we evaluate the model across multiple benchmarks: MMAU-mini [Sakshi et al., 2024]: A dataset with 1,500 closed-choice questions testing reasoning across sound, speech, and music. AIR-Bench [Yang et al., 2024b]: Various types of audio signals including human speech, natural sounds, and music. CoVoST2(zh-en) [Wang et al., 2021]: Speech-to-text translation in Chinese and English. MELD [Poria et al., 2019]: Emotion classification. Compared to Qwen2-Audio-Instruct [Chu et al., 2024], Audio-Reasoner achieves: $+25.4\%$ improvement on MMAU-mini with reasoning subtask gains: $+44.4\%$ , $+26.1\%$ , and $+9.3\%$ ; $+14.6\%$ gains on AIR-Bench chat; $+30.6\%$ on CoVoST2(ZN/EN subset, Average BLEU score.); $+8.01\%$ on MELD. These results validate the effectiveness of our approach in advancing long-context reasoning and inference scaling for audio models. + +The major contributions we have made in this paper include: + +- We propose Audio-Reasoner, designed for deep reasoning and inference scaling in the audio modality. Built upon Qwen2-Audio and fine-tuned with structured CoT training, Audio-Reasoner significantly improves long-context reasoning across diverse audio tasks. + +- We develop CoTA, a large-scale dataset with 1.2 million high-quality captions and QA pairs, spanning multiple audio domains. The dataset enables structured reasoning and enhances audio-language pretraining. +- We introduce a scalable data generation pipeline leveraging advanced commercial models to produce complex reasoning-based QA pairs and structured CoT annotations, enriching model training. +- We achieve state-of-the-art performance, with $+25.4\%$ gains over Qwen2-Audio-Instruct on MMAU-mini, along with significant improvements in reasoning, translation, and emotion recognition tasks. + +# 2 Related Work + +Chain-of-Thought Reasoning. LLMs leverage in-context learning (ICL) to enhance their reasoning capabilities by processing prompts and context. This is further strengthened through CoT techniques. Various CoT methods have been explored, including Tree of Thoughts (TOT) [Yao et al., 2023], manual few-shot CoT [Wei et al., 2022], and automatically generated approaches [Jin et al., 2024, Zhang et al., 2022]. In addition, studies have delved into the necessity of CoT, its theoretical foundations, and its effectiveness across a wide range of tasks [Sprague et al., 2024, Stechly et al., 2024, Turpin et al., 2023]. The release of OpenAI's o1 model [Jaech et al., 2024] has sparked renewed interest in CoT research, significantly boosting the capabilities of LLMs, especially in multi-step reasoning tasks such as coding [Zhang et al., 2024b] and mathematics [Yang et al., 2024a], setting new performance benchmarks. CoT techniques have been integrated with other methods such as Monte Carlo Tree Search [Browne et al., 2012], reflection [Guo et al., 2025], and tool use [Qin et al., 2023], and are often trained alongside reinforcement learning approaches [Rafailov et al., 2023, Zeng et al., 2024, Engstrom et al., 2020, Shao et al., 2024b]. + +Multimodal Chain-of-Thought. CoT techniques have also been explored in the realm of multimodal large models. For example, Visual-COT [Shao et al., 2024a] incorporates object detection to aid in reasoning, LLaVA-Reasoner [Zhang et al., 2024a] uses closed-source models for CoT fine-tuning through recaptioning. LLaVA-CoT [Xu et al., 2024] and MAmmoTH-VL [Guo et al., 2024] scale datasets to improve model performance. Other models, such as Mulberry [Wen et al., 2019], explore application of reflective thinking, and Image-of-Thought [Zhou et al., 2024] integrates image editing tools to enhance reasoning. Video-related studies [Wang et al., 2024, Han et al., 2024, Fei et al., 2024, Tang et al., 2024] have demonstrated the effectiveness of CoT in reasoning tasks within the video domain. However, the application of CoT in the audio domain is still in its infancy. The study Audio-COT [Ma et al., 2025] shows some improvement with zero-shot COT in audio tasks, but it falls short on more complex problems. This paper aims to explore this gap further. + +Large Audio Language Models. LALMs can be broadly categorized into two areas: audio understanding and real-time dialogue. Audio understanding models typically consist of a three-layer architecture—an encoder, connector, and an LLM—focusing on specific domains, as seen in models like Mu-LLaMA [Liu et al., 2024b], LTU [Gong et al., 2023b], EmoBox [Ma et al., 2024], and GAMA [Ghosh et al., 2024]. Other models, such as LTU-AS [Gong et al., 2023a], SALMONN [Tang et al., 2023] and Qwen2-Audio [Chu et al., 2024], employ unified architectures designed for multi-task training. Real-time conversation models, which focus on speech input and extend transformers to real-time speech synthesis, are also gaining popularity [Zhang et al., 2023b, Xie and Wu, 2024a,b, Fu et al., 2025, Defossez et al., 2024]. However, despite their focus on understanding and rapid response, current LALMs still lack significant exploration into reasoning tasks, such as COT. This paper addresses this gap by investigating the application of CoT in LALMs. + +# 3 Audio-Reasoner + +In this section, we present the training methodology for our Audio-Reasoner model, designed to enable LALMs with structured, step-by-step reasoning in multimodal tasks. Unlike standard models, Audio-Reasoner generates both a chain-of-thought (CoT) and final response, ensuring logical consistency and interpretability. We describe the training framework, which includes a structured CoT approach, a novel data generation pipeline, and the large-scale CoTA dataset. This dataset is carefully constructed through automated annotation, reasoning chain creation, and validation, setting the foundation for robust and explainable audio-based reasoning. + +![](images/43b5b9c5fc69a44f947aab314c7528399913cc4f40054888654c56601c09c66b.jpg) +Figure 2: Comparison between Audio-Reasoner and Qwen2-Audio-Instruct: While Qwen2-Audio-Instruct produces brief and error-prone reasoning, our Audio-Reasoner uses a structured reasoning framework with distinct "thinking" and "response" phases, ensuring a more reliable and well-supported output through careful planning, information extraction, and step-by-step reasoning. + +# 3.1 Model Training with Audio Reasoning + +A standard large language model is trained to generate an output sequence $Y$ given an input sequence $X$ . The probability distribution of the model's output is formulated as: + +$$ +P (Y \mid X; \theta) = f _ {\theta} (X), \tag {1} +$$ + +where $f_{\theta}$ is a Transformer-based model parameterized by $\theta$ . The training objective follows a maximum likelihood estimation framework: + +$$ +\mathcal {L} (\theta) = - \sum_ {i = 1} ^ {N} \log P \left(Y _ {i} \mid X _ {i}; \theta\right). \tag {2} +$$ + +In our Audio-Reasoner, the input consists of an audio signal $A$ and a text-based query $Q$ , forming the multimodal input representation: + +$$ +X = (A, Q). \tag {3} +$$ + +Unlike conventional LLMs, where the output is a single response, we structure the model's output into two distinct components: the chain of thought reasoning $C$ , which captures the step-by-step logical process, and the final response $R$ , which provides the ultimate answer. The model thus learns to generate the concatenation of $C$ and $R$ , leading to the probability distribution: + +$$ +P (C, R \mid A, Q; \theta) = f _ {\theta} (A, Q). \tag {4} +$$ + +To ensure explicit learning of both reasoning and final response generation, we construct a dataset defined as: + +$$ +\mathcal {D} = \left\{\left(A _ {i}, Q _ {i}, C _ {i}, R _ {i}\right) \right\} _ {i = 1} ^ {N}, \tag {5} +$$ + +where each training sample consists of an input audio signal $A_{i}$ , its corresponding textual query $Q_{i}$ , the structured reasoning process $C_{i}$ , and the final answer $R_{i}$ . This dataset formulation reinforces the model's ability to perform in-context learning and deep reasoning, ensuring that generated responses are not only accurate but also logically structured. + +The training objective maximizes the likelihood of both $C$ and $R$ , encouraging the model to first reason and then generate a response. The loss function is given by: + +$$ +\mathcal {L} (\theta) = - \sum_ {i = 1} ^ {N} \log P \left(C _ {i}, R _ {i} \mid A _ {i}, Q _ {i}; \theta\right). \tag {6} +$$ + +By optimizing this objective, Audio-Reasoner is trained to articulate a structured reasoning process before providing its final response. This approach enhances interpretability, reliability, and alignment with human reasoning. + +At inference-time, our Audio-Reasoner follows a structured reasoning pipeline, as illustrated in Figure 2. The reasoning process consists of four sequential steps: (1) Planning $(P)$ : The model analyzes the query, identifies key problem components, and outlines the reasoning steps necessary to derive an answer. (2) Captioning $(C)$ : Relevant multimodal content is extracted from the input, such as speech transcription, acoustic event detection, or context information. (3) Reasoning $(R)$ : Based on the extracted content, the model performs structured, step-by-step reasoning. (4) Summary $(S)$ : The model synthesizes its reasoning process into a final, concise, and accurate response. This structured inference process can be formalized as follows: + +$$ +P \sim f _ {\theta} (A, Q), \tag {7} +$$ + +$$ +C \sim f _ {\theta} (A, Q, P), \tag {8} +$$ + +$$ +R \sim f _ {\theta} (A, Q, P, C), \tag {9} +$$ + +$$ +S \sim f _ {\theta} (A, Q, P, C, R). \tag {10} +$$ + +Compared to the direct-response counterpart [Chu et al., 2024], this approach provides two key advantages: Improved Interpretability—By explicitly modeling each reasoning step, the process becomes more transparent, making it easier to analyze and diagnose errors. Reduced Hallucinations—The structured reasoning pipeline mitigates speculative or incorrect responses, ensuring that outputs remain logically grounded. + +Figure 2 illustrates the structured CoT reasoning process, highlighting how each stage contributes to the final response. This approach draws inspiration from recent advancements in symbolic reasoning and CoT training [Cui et al., 2024], which emphasize that zero-shot reasoning without training is less effective. Moreover, previous studies have shown that models tuned on native CoT data significantly outperform those trained on generic labels, especially in multimodal reasoning tasks [Guo et al., 2024, Wen et al., 2019]. + +Table 1: Domains and tasks of our constructed CoTA dataset. + +
DomainTaskDescription
SoundSound QASound question answering
SpeechSpeech QASpeech question answering
SERSpeaker emotion recognition
S2TTSpeech to text translation
MusicMusic QAMusic question answering
+ +# 3.2 Systematic Data Preparation for Audio Reasoning + +Training the Audio-Reasoner model requires a high-quality, diverse, and multitask audio-based reasoning dataset. Our goal is to develop a scalable and effective data generation method that systematically transforms raw audio data and simple human-labeled annotations into structured reasoning tasks. The resulting CoTA dataset with 1.2 million samples, focusing on complex reasoning-based question-answering tasks, spans three domains—audio, speech, and music—as detailed in Table 1. + +To achieve this, we design a structured data generation pipeline consisting of three key stages: (1) generating high-quality annotations and diverse questions, (2) constructing structured reasoning chains, and (3) performing comprehensive validation. The complete pipeline is illustrated in Figure 3. The following sections describe each stage in detail. + +![](images/cd2008c2e248f8239147184f97d37d4637b0f8e288fd39a3f6af8d50411851e3.jpg) +Figure 3: Multistage data generation pipeline. + +# 3.2.1 Multistage Data Generation Pipeline + +Stage 1: Automated Annotation and Question-Answer Synthesis. We begin by using advanced external closed-source models to improve the existing simple human annotations into high-quality and logically coherent training data. While large language models can produce hallucinations in free-form generation, they are highly effective at structured, evidence-based content creation. We leverage this strength by instructing the model to sequentially describe the audio elements, enhancing its understanding of sound sources and speech context. Based on these enriched descriptions, the model generates a diverse set of questions, ranging from straightforward factual queries to complex logical reasoning problems. This process ensures that the dataset captures a broad spectrum of reasoning tasks, as summarized in Table 1. Besides, in Sec. A.1 we provide the prompt in deducing closed-source models to generate questions and corresponding answers. + +Stage 2: Structured Reasoning Chain Construction. Next, we transform the generated question-answer pairs into structured reasoning chains. Given the limited development of CoT methodologies in the audio domain, we adopt a systematic approach to ensure inference stability. The model first plans and analyzes the questions, extracts key information from the captions, and formulates logical steps leading to the answer. To facilitate structured reasoning, we employ explicit step demarcations such as and , allowing the model to autonomously construct multi-step inference pathways. Sec. A.2 describes the prompt used for the structured reasoning chain construction process. + +Stage 3: Quality Assurance and Dataset Validation. Finally, we subject the generated data to a rigorous review process. Using the raw audio input, Stage 1 annotations, and Stage 2 reasoning chains, the model assesses whether the generated content is accurate, coherent, and suitable for inclusion in the final dataset. This step ensures the overall quality and reliability of the CoTA dataset. Sec. A.3 illustrates prompt used for filtering low-quality contents. + +# 3.2.2 Task Taxonomy: Categories and Reasoning Methodologies + +The CoTA dataset encompasses a range of reasoning-based tasks, each requiring distinct reasoning path that the model should grasp. These include: + +(1) Sound-Based Question Answering: The model identifies and analyzes sound characteristics, contextualizing them within the user's query to derive a reasoned response. (2) Speech-Based Question Answering: The model recognizes speaker timbres, transcribes speech content, and incrementally processes the question to determine the appropriate answer. (3) Speech Emotion Recognition (SER) and Speech-to-Text Translation (S2TT): These specialized tasks require the model to integrate speech recognition with emotion analysis and language translation, forming a structured reasoning process. (4) Music-Based Question Answering: As music is highly abstract, the + +Table 2: Composition of our CoTA Dataset. We consider Google Gemini [Team et al., 2024] to build the reasoning ability in CoTA. Note that Multi-Speaker and Complex Audio datasets are manually synthesized, details of which can be referred to Sec. B. + +
CategoryDataset SourceMain Skills LearningModel UsedQuantityPercentageSynthetic
SpeechMulti-SpeakerMulti-speaker Speech QAgemini-2.0-flash117.4k12.09%Yes
MELD [Poria et al., 2019]Speech Emotion QAgemini-2.0-pro-exp29.2k3.01%No
CoVoST2 [Wang et al., 2021]Speech-to-Text Translationgemini-2.0-flash224.6k23.13%No
Mustard [Castro et al., 2019]Sarcasm Detectiongemini-2.0-pro-exp1k0.10%No
MusicMusicBench [Melechovsky et al., 2024]Music QAgemini-2.0-flash137.1k14.12%No
SoundAudioSet [Gemmeke et al., 2017]Sound QAgemini-2.0-flash315.2k32.46%No
Clotho [Drossos et al., 2020]Sound QAgemini-2.0-pro-exp9.3k0.93%No
AudioCaps [Kim et al., 2019]Sound QAgemini-2.0-flash117.5k12.10%No
Complex AudioComplex Audio QAgemini-2.0-flash20k2.06%Yes
+ +model first analyzes fundamental attributes such as tonality, tempo, and emotion before progressing to genre classification and deeper inferential reasoning based on the user's query. A comprehensive breakdown of the dataset's task categories and reasoning pathways is provided in Table 1. + +# 3.2.3 Conclusion and Next Steps + +In summary, we have introduced a systematic data generation pipeline that ensures the creation of high-quality, structured reasoning data for the Audio-Reasoner model. Our approach involves enriching raw audio data with detailed annotations, generating diverse questions, constructing explicit reasoning chains, and implementing a comprehensive validation framework. The following section provides an in-depth analysis of the final CoTA dataset and its reasoning capabilities, with a detailed statistical overview presented in Table 2. + +# 3.3 CoTA Dataset Analysis + +To evaluate the quality and reasoning efficacy of the CoTA dataset, we analyze its design from two key perspectives: (1) comprehensive audio coverage, ensuring broad representation across real-world and synthetic scenarios, and (2) scalability of reasoning complexity, which aligns task difficulty with structured inference patterns. Together, these aspects address critical gaps in audio-language pretraining. + +Comprehensive Audio Coverage. CoTA integrates three audio domains—speech (38.33%), music (14.12%), and environmental sounds (47.55%)—ensuring diverse and representative coverage of real-world auditory contexts. This multi-domain structure captures a wide spectrum of acoustic phenomena, ranging from conversational speech (e.g., speech-to-text translation tasks in CoVoST 2) to intricate musical structures (MusicBench) and fine-grained environmental sound analysis (e.g., AudioSet's rich descriptions of acoustic environments). + +A distinctive feature of CoTA is its hybrid synthetic-real composition, where synthetic data (Multi-Speaker and Complex Audio, $14.15\%$ of total samples) is strategically incorporated to enhance complex reasoning tasks, such as multi-step logical inference in Complex Audio. Meanwhile, the majority of the dataset is derived from high-quality real-world sources (e.g., MELD for emotion recognition). By unifying tasks across 10 distinct categories, spanning from fundamental classification to advanced tasks like translation and irony detection, CoTA facilitates a hierarchical learning process—an aspect largely absent in traditional datasets constrained to simple labeling tasks. + +Scalability of Reasoning Complexity. The word count distribution in the CoTA dataset highlights the model's capacity for long-chain reasoning. As shown in Figure 4, most responses fall between 300 and 500 words, allowing for nuanced reasoning and detailed logical steps, particularly in audio and music question answering. This extended length supports transparency in reasoning, ensuring a thorough exploration of complex ideas. For more demanding tasks, such as those in the Multi-Speaker dataset, responses can reach up to 1,500 words. This increase reflects the model's ability to systematically break down intricate problems, demonstrating adaptive reasoning in scenarios requiring a deep understanding of multiple interacting elements. + +![](images/c632a41b5a7f412a8c682f3bc6d71109768d4b1c43941df87876b341f545f899.jpg) + +![](images/0bd2aa1ff7c3048ba77b507121a9d0abf1df8f3b78910baab538684bec5b6143.jpg) + +![](images/0b63d5816c7d0317a054224a41f1c67e98fb31529cf58d18f1df86ff86ef0e74.jpg) + +![](images/fc8096f2ec175706b289de4b055035b882781920208ae3008150038fb4a696d5.jpg) + +![](images/2de0a197ee7589cf5a822ea1b183f833691d28cda1c03b177d528955bc25806c.jpg) + +![](images/dd3a7cdcf4bf1022e1ef3b362fc3dc39661fa746dfbd2244684f3c5d5d0bd44f.jpg) + +![](images/d6eae35fa287899c4c41075663ba130ae8f203ee0f1b708b9020787659e4450b.jpg) +Figure 4: The bar chart shows the data length distribution across nine CoTA sub-datasets, with intervals of 100 on the horizontal axis (0—1200) and proportions on the vertical axis (totaling $100\%$ ). A moving average trend line is overlaid. + +![](images/9116a5ec7b70fbbbb30739b8027d02e6fc477dfb33fb817358a70343cda2369e.jpg) + +![](images/44be2c35e1b0d98bd8e74345eeddadcfbb53fdc71670efecae6cebf702ece18c.jpg) + +Conversely, simpler tasks like S2TT elicit more concise responses, typically between 100 and 200 words. This brevity prevents unnecessary elaboration, maintaining clarity and efficiency when a straightforward answer suffices. By adjusting response length based on task complexity, the model showcases its dynamic reasoning capabilities—balancing depth and precision to effectively address a wide range of analytical challenges. Sec. C introduces more evidence about CoTA's scalability of reasoning complexity. + +# 4 Experiments + +# 4.1 Experimental Setup + +Training Details. Our model, Audio-Reasoner, is built on Qwen2-Audio-Instruct [Chu et al., 2024], comprising a total of 8.4 billion parameters. The training process was conducted using the ms-swift framework [Zhao et al., 2024b], employing supervised fine-tuning with full-parameter tuning. We used a maximum learning rate of 1e-5 and trained the model for a single epoch across the entire CoTA dataset. + +Evaluation Metric. Existing evaluation datasets in the audio domain primarily focus on simple classification tasks or BLEU[Papineni et al., 2002] score-based assessments of model responses. These approaches, however, fail to comprehensively reflect the reasoning capabilities of LALMs. To address this, our evaluation methodology incorporates multiple diverse benchmarks. First, we assessed the model's accuracy on closed-form questions using the MMAU-mini subset [Sakshi et al., 2024]. This subset was specifically chosen since the model's training data does not include multiple-choice questions. Additionally, we evaluated its real-world conversational abilities using the chat and foundation sections of AIR-Bench [Yang et al., 2024b]. These benchmarks encompass various audio modalities, including sound, speech, and music. Beyond these, we also tested the model's performance on traditional audio-related tasks such as speech-to-text translation (S2TT) on CoVoST 2 [Wang et al., 2021] and speech emotion recognition (SER) on MELD dataset [Poria et al., 2019]. A comprehensive summary of the evaluation tasks and datasets is presented in Table 3. + +Baselines: We primarily select state-of-the-art large audio language models as the baselines for comparison. These include the closed-source models Gemini-1.5-pro [Team et al., 2024], GPT-4o [Hurst et al., 2024], Qwen-audio-turbo [Chu et al., 2023], as well as the open-source models SALMONN [Tang et al., 2023], Qwen-Audio-Chat [Chu et al., 2023], and Qwen2-AudioInstruct [Chu et al., 2024] that also serves as the base model. Additionally, we compared cascade model approaches such as Whisper [Radford et al., 2023] + GPT-4 [Achiam et al., 2023] and a series of mainstream multimodal large language models. [Gong et al., 2023b,a, Kong et al., 2024, Ghosh et al., 2024, Liu et al., 2024b, Su et al., 2023, Wu et al., 2024, Wang et al., 2023, Zhang et al., 2023a] + +Table 3: Summary of evaluation benchmarks for Audio-Reasoner. + +
DatasetSplitTaskMetric
MMAU-miniSoundSound QAACC
SpeechSpeech QAACC
MusicMusic QAACC
AIR-BenchChat-SoundSound QAGPT-4 Eval
Chat-SpeechSpeech QAGPT-4 Eval
Chat-MusicMusic QAGPT-4 Eval
Chat-MixedAudioComplex Audio QAGPT-4 Eval
Foundation-SoundAQASound QAACC
Foundation-SERSpeech QAACC
Foundation-SICSpeaker Intent ClassificationACC
Foundation-SNVSpeaker Number VerificationACC
Foundation-MusicAQAMusic QAACC
CoVoST 2TestSpeech-to-Text Translation (S2TT)BLEU
MELDTestSpeech Emotion Recognition (SER)ACC
+ +# 4.2 Main Results + +To evaluate the effectiveness of Audio-Reasoner, we compare its performance against both closed-source and open-source baselines on benchmark datasets of MMAU-mini and AIR-Bench chat/foundation, CoVoST 2 (zn/en subset) and MELD. The results in Tables 4, 5, 6, 7 and 8 clearly demonstrate that Audio-Reasoner significantly outperforms existing models, setting a new state-of-the-art in audio reasoning tasks. + +Table 4: Performance comparison on MMAU-mini. The $\{\mathbf{so},\mathbf{mu},\mathbf{sp}\}$ indicates whether "sound", "music", and "speech" have been used in training. + +
ModelSize{so, mu, sp}SoundMusicSpeechAvg
Closed-Source
gpt4o + caption----63.3660.7753.1557.30
gemini-1.5-pro----56.7549.4058.5554.90
Open-Source
LTU7BY Y N22.529.6917.7116.89
LTU-AS7BY Y Y23.359.1020.6017.68
Audio Flamingo - Chat2.2BY Y N23.4215.2611.4116.69
GAMA7BY Y N41.4432.3318.9130.90
GAMA-IT7BY Y N43.2428.4418.9130.20
MU-LLaMA7BN Y N40.8432.6322.2231.90
SALMONN13BY Y Y41.0034.8025.5033.70
Qwen-audio-Chat8.4BY Y Y55.2544.0030.0343.10
Qwen2-Audio-Instruct8.4BY Y Y54.9550.9842.0449.20
Ours
Audio-Reasoner8.4BY Y Y60.0664.3060.7061.71
+ +Performance on MMAU-mini. MMAU-mini in Table 4 assesses multimodal audio understanding across three major domains: sound, music, and speech. We first make a comparison with closed-source models. Audio-Reasoner achieves the highest overall score (61.71%) outperforming GPT-4o (57.30%) and Gemini-1.5-Pro (54.90%). Music reasoning shows the most significant improvement—Audio-Reasoner: 64.30%, GPT-4o 60.77% and Gemini-1.5-Pro: 49.40%. This indicates superior + +Table 5: Performance comparison on AIR-Bench chat benchmark. + +
ModelSoundMusicSpeechMixed AudioAverage
Closed-Source
Whisper+GPT4--7.54-7.54
Qwen-Audio-Turbo6.595.987.045.776.34
Gemini-1.5-pro5.495.066.975.275.70
Open-Source
Macaw-LLM1.010.910.971.001.01
SpeechGPT0.950.951.571.141.15
Next-gpt4.764.183.862.924.13
Pandagpt5.465.063.582.934.25
BLSP5.555.086.174.525.33
Qwen-Audio6.955.526.475.386.08
SALMONN6.285.956.166.086.11
Qwen2-Audio-Instruct6.996.797.186.776.93
Ours
Audio-Reasoner7.688.058.196.657.94
+ +musical structure comprehension, enabled by CoTA's diverse music-based tasks. Speech-based reasoning is also notably strong—Audio-Reasoner: $60.70\%$ , GPT-4o: $53.15\%$ and Gemini-1.5-Pro: $58.55\%$ . This validates CoTA's impact in training models for context-dependent spoken language understanding. In comparison with open-source models, Audio-Reasoner surpasses all open-source models, with the next-best, Qwen2-Audio-Instruct, trailing by 12.51 percentage points $(49.20\%)$ . Across individual domains, Audio-Reasoner achieves $60.06\%$ in sound reasoning (beating Qwen2-Audio's $54.95\%$ ), $64.30\%$ in music (outperforming Qwen2-Audio's $50.98\%$ ), and $60.70\%$ in speech (exceeding Qwen2-Audio's $42.04\%$ ). + +Performance on AIR-Bench chat. (1) chat benchmark. AIR-Bench chat in Table 5 evaluates contextual and conversational reasoning across four audio types: sound, music, speech, and mixed audio. Regarding closed-source models, Audio-Reasoner achieves the highest overall score (7.94), outperforming Gemini-1.5-Pro (5.70) and Whisper+GPT-4 (7.54). It shows the most significant improvements in music (8.05) and speech (8.19). Additionally, its mixed audio reasoning score (6.65) demonstrates proficiency in handling multi-source audio tasks. As for comparison with open-source models, Audio-Reasoner sets a new benchmark, surpassing Qwen2-Audio (6.93) by 1.01 points. Across domains, it achieves 7.68 in sound (beating Qwen2-Audio's 6.99), 8.05 in music (exceeding Qwen2-Audio's 6.79), and 8.19 in speech (outperforming Qwen-2-Audio's 7.18), showcasing balanced expertise. + +Table 6: Performance comparison on AIR-Bench foundation benchmark. + +
ModelAIR-SoundAIR-MusicAIR-SpeechAverage
SoundAQAMusicAQASERSICSNV
Closed-Source
whisper+GPT4--59.587.730.059.1
Qwen-Audio-Turbo62.862.560.056.454.359.2
Open-Source
NEXT-GPT18.847.125.725.625.428.5
SpeechGPT33.931.337.645.832.636.2
BLSP36.131.027.446.628.133.8
PandaGPT48.750.726.028.543.239.4
SALMONN28.454.629.936.734.336.8
Qwen-Audio-Chat64.648.243.277.835.353.8
Ours
Audio-Reasoner65.755.260.588.156.365.2
+ +(2) foundation benchmark. AIR-Bench foundation in Table 6 evaluates fundamental audio understanding across three primary categories: sound, music, and speech, with speech further divided into three subdomains: Speech Emotion Recognition (SER), Speaker Identification and Classification (SIC), and Speech Number Variation (SNV). Audio-Reasoner achieves the highest overall score (65.2), outperforming both closed-source and open-source baselines. Compared to the strongest closed-source model, Qwen-Audio-Turbo (59.2), Audio-Reasoner leads by 6.0 points, demonstrating superior reasoning across all audio domains. With the sound category, Audio-Reasoner attains 65.7, surpassing Qwen-Audio-Chat (64.6) and Qwen-Audio-Turbo (62.8), highlighting its strong ability in environmental and non-speech audio understanding. For music reasoning, Audio-Reasoner achieves 55.2, significantly outperforming Qwen-Audio-Turbo (48.2) and all open-source baselines, indicating better comprehension of musical structures and attributes. Regarding speech reasoning, Audio-Reasoner sets new state-of-the-art results across all subdomains. It attains 60.5 in SER (v.s. Qwen-Audio-Turbo's 60.0), 88.1 in SIC (surpassing Whisper+GPT-4's 87.7), and 56.3 in SNV (exceeding Qwen-Audio-Turbo's 54.3). The substantial lead in SIC showcases its exceptional speaker recognition capability, benefiting from CoTA's step-by-step reasoning process. + +Table 7: Performance comparison of the speech-to-text translation (S2TT) task on CoVoST 2 dataset. We consider the mutual conversion between Chinese and English as training and evaluation data. + +
ModelEN-ZNZN-ENAvg
BLEU1BLEU2BLEU3BLEU4AvgBLEU1BLEU2BLEU3BLEU4Avg
Closed-Source
Gemini-1.5-pro68.2549.1237.8129.7946.2451.8326.6116.2710.8826.3936.32
Open-Source
Qwen2-Audio-Instruct58.6339.5528.7121.4037.0748.5224.3114.659.2424.1830.63
Ours
Audio-Reasoner72.8954.1742.4633.9550.8756.5029.9918.3711.6229.1340.00
+ +Performance on CoVoST 2. The CoVoST 2 dataset in Table 7 evaluates speech-to-text translation, a fundamental task in cross-lingual speech understanding. Audio-Reasoner demonstrates the strengths of Audio-Reasoner in speech-to-text translation across both English-to-Chinese (EN-ZN) and Chinese-to-English (ZN-EN) tasks. + +For EN-ZN translation, Audio-Reasoner outperforms both closed-source Gemini-1.5-pro and open-source Qwen2-Audio-Instruct. With an average BLEU score of 50.87, it surpasses Gemini-1.5-pro's score of 46.24 by 4.63 points and Qwen2-Audio-Instruct's 37.07 by a significant 13.80 points. Audio-Reasoner's BLEU-4 score of 33.95 highlights its ability to generate fluent, high-quality translations, even for more complex sentence structures. In the ZN-EN translation task, Audio-Reasoner continues to show superiority with an average BLEU score of 29.13, outperforming Gemini-1.5-pro (with a score of 26.39) by 2.74 points and Qwen2-Audio-Instruct (with a score of 24.18) by 4.95 points. Its BLEU-4 score of 11.62 reflects an enhanced ability to produce coherent and accurate translations, particularly in more challenging, longer sentences. + +These results demonstrate that Audio-Reasoner excels in both capturing cross-lingual semantic alignment, consistently outperforming existing models in speech-to-text translation tasks. + +Table 8: Performance comparison of the speech emotion recognition (SER) task on MELD dataset. + +
ModelUnweighted_ACC
EMO-box31.5
SALMONN39.2
Qwen2-Audio-Instruct49.9
Audio-Reasoner53.9
+ +Performance on MELD. The MELD dataset in Table 8 evaluates speech emotion recognition, a crucial task for understanding affective states in spoken language. Audio-Reasoner achieves the highest unweighted accuracy of 53.9, outperforming all baseline models, including the previous state-of-the-art Qwen2-Audio-Instruct (49.9) by 4.0 percentage points. The results further validate that Audio-Reasoner excels in emotion perception and contextual audio understanding, solidifying its effectiveness in real-world speech emotion recognition tasks. + +![](images/9322ca54bc2fde2f6f2c9939c1e41fb674388081d73e82a1630c156cc78330a8.jpg) +Figure 5: Example of Audio-Reasoner answering music-based question. + +Analyze the interplay between the guitar riff and the spoken commentary. How do they contribute to the overall impression or potential purpose of the piece? + +# 1 + +![](images/2037f12e5111931657463be8d6a3644c6f01c10ec8a126d8d0536b94ae5f6035.jpg) + +# + + The question asks about interplay and contribution. Analyze the music clip, focusing on: Characterizing the guitar riff: style, rhythm, melodic features. Characterizing the spoken commentary: tone, content, frequency. Analyzing their relationship: explain, praise, interrupt. Inferring the overall impression and purpose: instruction, demonstration, etc.? + + The audio has a blues - rock guitar riff on a clean - toned electric guitar, moderate tempo, 4/4 time. There's male spoken commentary, informal and relaxed, suggesting instruction or informal performance. + + Consider scenarios: + +**Scenario 1: Instructional Context + +If commentary explains riff aspects (e.g., "bend on third string", "E minor blues progression"), it's instructional. Guitar demonstrates, commentary clarifies. If it gives positive reinforcement ("very cool riff, man"), it's a casual lesson or performance with teaching. + +**Scenario 2: Informal Performance/Demonstration + +If commentary is minimal (reactions like "yeah", "alright"), it's an informal performance. Riffs simplicity and repetition suggest a demonstration for less - experienced musicians. + +The audio leans towards scenario 1. "very cool riff, man" implies positive assessment for teaching or demonstration. Clean tone and moderate pace suggest instruction/demonstration, not a raw blues performance. + +Refining: E minor blues with E - B chord progression and C# implies commentary helps demonstrate their use. + + The blues - rock, moderate - tempo guitar riff is complemented by relaxed, informal spoken commentary. The interplay indicates a purpose of instruction or demonstration of the riff's construction. The positive comment supports an informal teaching or relaxed demonstration. + +# + + + +The interplay creates an impression of informal instruction or demonstration. The clean - toned guitar plays a moderate - tempo blues - rock riff, and the male voice gives relaxed commentary like "very cool riff, man". The riff's simplicity, moderate pace, and E minor blues elements suggest the purpose is to demonstrate the riff and how to play its features, not a formal performance or raw blues. + + + +Why Audio-Reasoner Excels. While open-source models typically lag behind closed-source alternatives, Audio-Reasoner narrows this gap significantly. It outperforms Gemini-1.5-Pro in multiple categories and achieves competitive results against GPT-4o. A key factor driving Audio-Reasoner's state-of-the-art performance is the CoTA dataset, which provides an unparalleled foundation for comprehensive, structured, and context-aware audio reasoning. The dataset contributes to Audio-Reasoner's success in two critical ways: (1) CoTA's comprehensive audio reasoning data. Unlike models trained on fragmented or task-specific datasets, Audio-Reasoner benefits from CoTA's extensive and balanced coverage of sound, speech, and music-based reasoning. This ensures robust adaptation across diverse audio tasks, enhancing its ability to generalize across multimodal benchmarks. (2) CoTA's strong logical and contextual inference. CoTA's emphasis on structured reasoning and contextual awareness enables Audio-Reasoner to outperform existing models in long-form reasoning (MMAU-mini) and conversational audio understanding (AIR-Bench chat). By leveraging deep logical inference and nuanced context retention, it surpasses prior approaches in handling complex speech interactions and multimodal reasoning. + +These strengths position Audio-Reasoner as a breakthrough in open-source audio intelligence, setting a new benchmark for multimodal reasoning in speech comprehension, translation, and real-world audio interpretation. + +# 4.3 Case Study + +Here we show a case in Figure 5, demonstrating the audio-based reasoning capability of Audio-Reasoner. The system analyzes the interplay between a guitar riff and a spoken commentary, systematically breaking down their characteristics and relationship. It identifies key musical features, assesses the commentary's tone and intent, and infers the overall purpose of the piece. By considering different scenarios, Audio-Reasoner determines that the interplay suggests an informal instructional or demonstrative context rather than a formal performance. This example highlights the model's ability to extract meaningful insights from audio, combining musical analysis with contextual interpretation. + +For more remarkable examples, please refer to Sec. D. + +# 5 Conclusion + +In this work, we introduced Audio-Reasoner, a large audio language model (LALM) designed to advance deep reasoning in audio-based tasks. By leveraging inference scaling and structured chain-of-thought (CoT) reasoning, we demonstrated significant performance improvements across key benchmarks. Central to our approach is CoTA, a large-scale, high-quality dataset containing around 1.2 million structured reasoning samples, which we generated through a systematic pipeline of annotation refinement, question synthesis, and CoT generation. + +Our experimental results highlight the effectiveness of structured reasoning in the audio domain, achieving state-of-the-art performance on MMAU-mini $(+25.04\%)$ , CoVoST 2 $(+8.31\%)$ , and MELD $(+8.01\%)$ . These findings underscore the critical role of reasoning-rich datasets and inference scaling in multimodal learning, particularly for audio-based tasks where existing models struggle with complex reasoning. + +Moving forward, we believe that structured CoT methodologies will play a crucial role in enhancing the reasoning capabilities of LALMs. Future research directions include extending Audio-Reasoner's capabilities to multi-turn dialogue reasoning, cross-modal knowledge integration, and real-world applications in speech-driven AI systems. By publicly releasing our model, dataset, and code, we aim to foster further innovation and collaboration in the field of audio reasoning, paving the way for more advanced and interpretable LALMs. + +# References + +Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. +Cameron B Browne, Edward Powley, Daniel Whitehouse, Simon M Lucas, Peter I Cowling, Philipp Rohlfshagen, Stephen Tavener, Diego Perez, Spyridon Samothrakis, and Simon Colton. A survey of monte carlo tree search methods. IEEE Transactions on Computational Intelligence and AI in Games (T-CIAIG), (1):1-43, 2012. +Santiago Castro, Devamanyu Hazarika, Verónica Pérez-Rosas, Roger Zimmermann, Rada Mihalcea, and Soujanya Poria. Towards multimodal sarcasm detection. In Annual Meeting of the Association for Computational Linguistics (ACL), pages 4619-4629, 2019. +Yunfei Chu, Jin Xu, Xiaohuan Zhou, Qian Yang, Shiliang Zhang, Zhijie Yan, Chang Zhou, and Jingren Zhou. Qwen-audio: Advancing universal audio understanding via unified large-scale audio-language models. arXiv preprint arXiv:2311.07919, 2023. +Yunfei Chu, Jin Xu, Qian Yang, Haojie Wei, Xipin Wei, Zhifang Guo, Yichong Leng, Yuanjun Lv, Jinzheng He, Junyang Lin, et al. Qwen2-audio technical report. arXiv preprint arXiv:2407.10759, 2024. +Yingqian Cui, Pengfei He, Xianfeng Tang, Qi He, Chen Luo, Jiliang Tang, and Yue Xing. A theoretical understanding of chain-of-thought: Coherent reasoning and error-aware demonstration. arXiv preprint arXiv:2410.16540, 2024. +Alexandre Defossez, Laurent Mazare, Manu Orsini, Amelie Royer, Patrick Pérez, Hervé Jégou, Edouard Grave, and Neil Zeghidour. Moshi: a speech-text foundation model for real-time dialogue. arXiv preprint arXiv:2410.00037, 2024. +Yuntian Deng, Yejin Choi, and Stuart Shieber. From explicit cot to implicit cot: Learning to internalize cot step by step. arXiv preprint arXiv:2405.14838, 2024. +Konstantinos Drossos, Samuel Lipping, and Tuomas Virtanen. Clotho: An audio captioning dataset. In International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 736-740, 2020. +Zhihao Du, Yuxuan Wang, Qian Chen, Xian Shi, Xiang Lv, Tianyu Zhao, Zhifu Gao, Yexin Yang, Changfeng Gao, Hui Wang, et al. Cosyvoice 2: Scalable streaming speech synthesis with large language models. arXiv preprint arXiv:2412.10117, 2024. + +Logan Engstrom, Andrew Ilyas, Shibani Santurkar, Dimitris Tsipras, Firdaus Janoos, Larry Rudolph, and Aleksander Madry. Implementation matters in deep policy gradients: A case study onppo and trpo. arXiv preprint arXiv:2005.12729, 2020. +Hao Fei, Shengqiong Wu, Wei Ji, Hanwang Zhang, Meishan Zhang, Mong Li Lee, and Wynne Hsu. Video-of-thought: step-by-step video reasoning from perception to cognition. In International Conference on Machine Learning (ICML), pages 13109-13125, 2024. +Chaoyou Fu, Haojia Lin, Xiong Wang, Yi-Fan Zhang, Yunhang Shen, Xiaoyu Liu, Yangze Li, Zuwei Long, Heting Gao, Ke Li, et al. Vita-1.5: Towards gpt-4o level real-time vision and speech interaction. arXiv preprint arXiv:2501.01957, 2025. +Jort F Gemmeke, Daniel PW Ellis, Dylan Freedman, Aren Jansen, Wade Lawrence, R Channing Moore, Manoj Plakal, and Marvin Ritter. Audio set: An ontology and human-labeled dataset for audio events. In International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 776-780. IEEE, 2017. +Sreyan Ghosh, Sonal Kumar, Ashish Seth, Chandra Kiran Reddy Evuru, Utkarsh Tyagi, S Sakshi, Oriol Nieto, Ramani Duraiswami, and Dinesh Manocha. Gama: A large audio-language model with advanced audio understanding and complex reasoning abilities. In Empirical Methods in Natural Language Processing, pages 6288-6313, 2024. +Yuan Gong, Alexander H Liu, Hongyin Luo, Leonid Karlinsky, and James Glass. Joint audio and speech understanding. In Automatic Speech Recognition and Understanding Workshop (ASRU), pages 1-8, 2023a. +Yuan Gong, Hongyin Luo, Alexander H Liu, Leonid Karlinsky, and James Glass. Listen, think, and understand. arXiv preprint arXiv:2305.10790, 2023b. +Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025. +Jarvis Guo, Tuney Zheng, Yuelin Bai, Bo Li, Yubo Wang, King Zhu, Yizhi Li, Graham Neubig, Wenhu Chen, and Xiang Yue. Mammoth-vl: Eliciting multimodal reasoning with instruction tuning at scale. arXiv preprint arXiv:2412.05237, 2024. +Songhao Han, Wei Huang, Hairong Shi, Le Zhuo, Xiu Su, Shifeng Zhang, Xu Zhou, Xiaojuan Qi, Yue Liao, and Si Liu. Videospresso: A large-scale chain-of-thought dataset for fine-grained video reasoning via core frame selection. arXiv preprint arXiv:2411.14794, 2024. +Binyuan Hui, Jian Yang, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Lei Zhang, Tianyu Liu, Jiajun Zhang, Bowen Yu, Keming Lu, et al. Qwen2. 5-coder technical report. arXiv preprint arXiv:2409.12186, 2024. +Aaron Hurst, Adam Lerner, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024. +Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024. +Feihu Jin, Yifan Liu, and Ying Tan. Zero-shot chain-of-thought reasoning guided by evolutionary algorithms in large language models. arXiv preprint arXiv:2402.05376, 2024. +Chris Dongjoo Kim, Byeongchang Kim, Hyunmin Lee, and Gunhee Kim. Audiocaps: Generating captions for audios in the wild. In *Nations of the Americas* Chapter of the Association for Computational Linguistics (NAACL), pages 119-132, 2019. +Zhifeng Kong, Arushi Goel, Rohan Badlani, Wei Ping, Rafael Valle, and Bryan Catanzaro. Audio flamingo: A novel audio language model with few-shot learning and dialogue abilities. In International Conference on Machine Learning (ICML), pages 25125-25148, 2024. + +Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024a. +Shansong Liu, Atin Sakkeer Hussain, Chenshuo Sun, and Ying Shan. Music understanding llama: Advancing text-to-music generation with question answering and captioning. In International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 286-290, 2024b. +Ziyang Ma, Mingjie Chen, Hezhao Zhang, Zhisheng Zheng, Wenxi Chen, Xiquan Li, Jiaxin Ye, Xie Chen, and Thomas Hain. Emobox: Multilingual multi-corpus speech emotion recognition toolkit and benchmark. arXiv preprint arXiv:2406.07162, 2024. +Ziyang Ma, Zhuo Chen, Yuping Wang, Eng Siong Chng, and Xie Chen. Audio-cot: Exploring chain-of-thought reasoning in large audio language model. arXiv preprint arXiv:2501.07246, 2025. +Jan Melechovsky, Zixun Guo, Deepanway Ghosal, Navonil Majumder, Dorien Herremans, and Soujanya Poria. Mustango: Toward controllable text-to-music generation. In *Nations of the Americas Chapter of the Association for Computational Linguistics (NAACL)*, pages 8286-8309, 2024. +Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling. arXiv preprint arXiv:2501.19393, 2025. +Vassil Panayotov, Guoguo Chen, Daniel Povey, and Sanjeev Khudanpur. Librispeech: an asr corpus based on public domain audio books. In International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 5206-5210, 2015. +Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. Bleu: a method for automatic evaluation of machine translation. In Annual Meeting of the Association for Computational Linguistics (ACL), pages 311-318, 2002. +Soujanya Poria, Devamanyu Hazarika, Navonil Majumder, Gautam Naik, Erik Cambria, and Rada Mihalcea. Meld: A multimodal multi-party dataset for emotion recognition in conversations. In Annual Meeting of the Association for Computational Linguistics (ACL), pages 527-536, 2019. +Yujia Qin, Shihao Liang, Yining Ye, Kunlun Zhu, Lan Yan, Yaxi Lu, Yankai Lin, Xin Cong, Xiangru Tang, Bill Qian, et al. Toolllm: Facilitating large language models to master 16000+ real-world apis. arXiv preprint arXiv:2307.16789, 2023. +Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, and Ilya Sutskever. Robust speech recognition via large-scale weak supervision. In International Conference on Machine Learning (ICML), pages 28492-28518, 2023. +Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. In Advances in Neural Information Processing Systems (NeurIPS), pages 53728-53741, 2023. +S Sakshi, Utkarsh Tyagi, Sonal Kumar, Ashish Seth, Ramaneswaran Selvakumar, Oriol Nieto, Ramani Duraiswami, Sreyan Ghosh, and Dinesh Manocha. Mmau: A massive multi-task audio understanding and reasoning benchmark. In International Conference on Learning Representations (ICLR), 2024. +Hao Shao, Shengju Qian, Han Xiao, Guanglu Song, Zhuofan Zong, Letian Wang, Yu Liu, and Hongsheng Li. Visual cot: Unleashing chain-of-thought reasoning in multi-modal language models. arXiv preprint arXiv:2403.16999, 2024a. +Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024b. + +Zayne Sprague, Fangcong Yin, Juan Diego Rodriguez, Dongwei Jiang, Manya Wadhwa, Prasann Singhal, Xinyu Zhao, Xi Ye, Kyle Mahowald, and Greg Durrett. To cot or not to cot? chain-of-thought helps mainly on math and symbolic reasoning. arXiv preprint arXiv:2409.12183, 2024. +Kaya Stechly, Karthik Valmeekam, and Subbarao Kambhampati. Chain of thoughtlessness? an analysis of cot in planning. In Advances in Neural Information Processing Systems (NeurIPS), pages 29106-29141, 2024. +Yixuan Su, Tian Lan, Huayang Li, Jialu Xu, Yan Wang, and Deng Cai. Pandagpt: One model to instruction-follow them all. In Workshop on Taming Large Language Models: Controllability in the era of Interactive Assistants (TLLM), pages 11-23, 2023. +Changli Tang, Wenyi Yu, Guangzhi Sun, Xianzhao Chen, Tian Tan, Wei Li, Lu Lu, Zejun Ma, and Chao Zhang. *Salmon: Towards generic hearing abilities for large language models.* arXiv preprint arXiv:2310.13289, 2023. +Yunlong Tang, Gen Zhan, Li Yang, Yiting Liao, and Chenliang Xu. Cardiff: Video salient object ranking chain of thought reasoning for saliency prediction with diffusion. arXiv preprint arXiv:2408.12009, 2024. +Gemini Team, Petko Georgiev, Ving Ian Lei, Ryan Burnell, Libin Bai, Anmol Gulati, Garrett Tanzer, Damien Vincent, Zhufeng Pan, Shibo Wang, et al. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530, 2024. +Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025. +Miles Turpin, Julian Michael, Ethan Perez, and Samuel Bowman. Language models don't always say what they think: Unfaithful explanations in chain-of-thought prompting. In Advances in Neural Information Processing Systems (NeurIPS), pages 74952-74965, 2023. +Changhan Wang, Anne Wu, Jiatao Gu, and Juan Pino. Covost 2 and massively multilingual speech translation. In Conference of the International Speech Communication Association (Interspeech), pages 2247-2251, 2021. +Chen Wang, Minpeng Liao, Zhongqiang Huang, Jinliang Lu, Junhong Wu, Yuchen Liu, Chengqing Zong, and Jiajun Zhang. Blsp: Bootstrapping language-speech pre-training via behavior alignment of continuation writing. arXiv preprint arXiv:2309.00916, 2023. +Yan Wang, Yawen Zeng, Jingsheng Zheng, Xiaofen Xing, Jin Xu, and Xiangmin Xu. Videocot: A video chain-of-thought dataset with active annotation tool. In Workshop on Advances in Language and Vision Research (ALVR), pages 92-101, 2024. +Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. In Advances in Neural Information Processing Systems (NeurIPS), pages 24824-24837, 2022. +Peng Wen, Teng-Gen Hu, Robert J Linhardt, Sen-Tai Liao, Hong Wu, and Yu-Xiao Zou. Mulberry: A review of bioactive compounds and advanced processing technology. Trends in food science & technology, 83:138-158, 2019. +Shengqiong Wu, Hao Fei, Leigang Qu, Wei Ji, and Tat-Seng Chua. Next-gpt: Any-to-any multimodal llm. In International Conference on Machine Learning (ICML), pages 53366-53397, 2024. +Zhifei Xie and Changqiao Wu. Mini-omni: Language models can hear, talk while thinking in streaming. arXiv preprint arXiv:2408.16725, 2024a. +Zhifei Xie and Changqiao Wu. Mini-omni2: Towards open-source gpt-4o with vision, speech and duplex capabilities. arXiv preprint arXiv:2410.11190, 2024b. +Guowei Xu, Peng Jin, Li Hao, Yibing Song, Lichao Sun, and Li Yuan. Llava-o1: Let vision language models reason step-by-step. arXiv preprint arXiv:2411.10440, 2024. + +An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, et al. Qwen2. 5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122, 2024a. +Qian Yang, Jin Xu, Wenrui Liu, Yunfei Chu, Ziyue Jiang, Xiaohuan Zhou, Yichong Leng, Yuanjun Lv, Zhou Zhao, Chang Zhou, et al. Air-bench: Benchmarking large audio-language models via generative comprehension. In Annual Meeting of the Association for Computational Linguistics (ACL), pages 1979-1998, 2024b. +Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Tom Griffiths, Yuan Cao, and Karthik Narasimhan. Tree of thoughts: Deliberate problem solving with large language models. In Advances in Neural Information Processing Systems (NeurIPS), pages 11809-11822, 2023. +Yongcheng Zeng, Guoqing Liu, Weiyu Ma, Ning Yang, Haifeng Zhang, and Jun Wang. Token-level direct preference optimization. In International Conference on Machine Learning (ICML), pages 58348-58365, 2024. +Dong Zhang, Shimin Li, Xin Zhang, Jun Zhan, Pengyu Wang, Yaqian Zhou, and Xipeng Qiu. Speechgpt: Empowering large language models with intrinsic cross-modal conversational abilities. arXiv preprint arXiv:2305.11000, 2023a. +Dong Zhang, Shimin Li, Xin Zhang, Jun Zhan, Pengyu Wang, Yaqian Zhou, and Xipeng Qiu. Speechgpt: Empowering large language models with intrinsic cross-modal conversational abilities. In Empirical Methods in Natural Language Processing (EMNLP), pages 15757-15773, 2023b. +Ruohong Zhang, Bowen Zhang, Yanghao Li, Haotian Zhang, Zhiqing Sun, Zhe Gan, Yinfei Yang, Ruoming Pang, and Yiming Yang. Improve vision language model chain-of-thought reasoning. arXiv preprint arXiv:2410.16198, 2024a. +Yuxiang Zhang, Shangxi Wu, Yuqi Yang, Jiangming Shu, Jinlin Xiao, Chao Kong, and Jitao Sang. o1-coder: an o1 replication for coding. arXiv preprint arXiv:2412.00154, 2024b. +Zhuosheng Zhang, Aston Zhang, Mu Li, and Alex Smola. Automatic chain of thought prompting in large language models. arXiv preprint arXiv:2210.03493, 2022. +Yu Zhao, Huifeng Yin, Bo Zeng, Hao Wang, Tianqi Shi, Chenyang Lyu, Longyue Wang, Weihua Luo, and Kaifu Zhang. Marco-ol: Towards open reasoning models for open-ended solutions. arXiv preprint arXiv:2411.14405, 2024a. +Yuze Zhao, Jintao Huang, Jinghan Hu, Xingjun Wang, Yunlin Mao, Daoze Zhang, Zeyinzi Jiang, Zhikai Wu, Baole Ai, Ang Wang, et al. Swift: a scalable lightweight infrastructure for fine-tuning. arXiv preprint arXiv:2408.05517, 2024b. +Qiji Zhou, Ruochen Zhou, Zike Hu, Panzhong Lu, Siyang Gao, and Yue Zhang. Image-of-thought prompting for visual reasoning refinement in multimodal large language models. arXiv preprint arXiv:2405.13872, 2024. +Anni Zou, Zhuosheng Zhang, Hai Zhao, and Xiangru Tang. Generalizable chain-of-thought prompting in mixed-task scenarios with large language models. arXiv preprint arXiv:2310.06692, 2023. + +# A Prompt Details + +A universally applicable method for writing prompts involves three key components: a clear task definition, a structured example, and a precise format specification. Our prompt adheres to this methodology by first defining the task explicitly, outlining the need for detailed audio descriptions and progressively challenging questions. It then provides a structured example that demonstrates the expected output format, ensuring clarity and minimizing ambiguity. Lastly, it specifies the exact formatting rules using delimiters such as ... and ..., ensuring consistency in responses. This approach guarantees efficiency by eliminating interpretative variance, allowing for precise and reproducible outputs. When drafting this prompt, we adhered to a structured approach to maximize clarity and effectiveness. The first-person perspective is used to emphasize our direct involvement in designing the task, ensuring the reader understands the rationale behind each structural choice. The structure follows a logical progression: we begin by introducing the general method, transition into an explanation of how our prompt aligns with this method, and conclude by justifying the approach's efficiency. By maintaining an academic tone, we reinforce the credibility and rigor of our prompt-writing methodology. We list some of the prompts used for tasks at different stages following below. + +# A.1 Prompt of Stage 1 when Processing Data (Sample from AudioSet) + +We are annotating some audio and designing some questions. You are an excellent audio analyst. Next, you will receive an audio and one absolutely correct but simple description. Your task is to first generate a more detailed, in-depth and absolutely correct new description based on the given descriptions. Then, use this description to generate three open-ended or single-choice questions with four options along with their answers. Please separate different parts using ... +... +... + +... +.... + +Here is a sample. Please strictly follow the format in the sample. The audio presents a sustained, high-frequency static noise, characteristic of a detuned or malfunctioning electronic device, likely a television or radio...Describe the characteristics of the static noise in the audio, and how these characteristics change over time. What...? + +Here is the original description: *** label here ***. + +Here is the audio. + +Please design three questions that gradually become more challenging, starting from basic factual questions, but don't deviate from the content of the audio itself. If it's a single-choice question, please give four options like (a) one, (b) two, .... and the answer should be analyzed and end with a format like "the answer is B." + +# A.2 Prompt of Stage 2 when Processing Data (Sample from AudioSet) + +We are now designing a system to generate structured audio-based chain-of-thought reasoning data. You will receive an audio clip, its textual description, as well as a question and its answer. Your task is to explore in more detail the thinking process from the question to the answer. Your chain of thought will be divided into two main parts, the and sections. In the section, you need to go through four steps: planning, captioning, reasoning, and summarizing. The section is invisible to the user. Therefore, in the section, you need to base on all the reasoning processes and results in the section and provide a final reply based on the question. Finally, your reply should strictly follow the following format: (In this part, analyze the user's needs and how to complete the task. If the problem is complex, it should be done step by step) (In this part, conduct an overall analysis of the given audio input content, try to find all the parts related to the question, describe them, and ensure it is completely correct.) (Start reasoning towards the final answer here) (Draw appropriate results based on the reasoning part here) Give the final answer here referring to the part Please strictly follow the format of the sample. + +Sample: + + + + + +The user wants to understand the dynamic changes within the provided audio clip .... + + + + + +The audio clip predominantly features static noise. .... similar to that of a detuned television or a device failing to receive a signal. + + + + + +1. Identify changes in Intensity (Volume): The audio's static noise does not remain at a constant volume. There are noticeable increases and decreases in loudness throughout the clip. .... is struggling to maintain a consistent output, adding to the impression of something malfunctioning or broken. + + + + + +The static noise in the audio is highly dynamic. .... leading to a sense of disorder and instability. + + + + + + + +The audio presents a static noise, .... The overall effect is one of energetic chaos, preventing any possibility of calm or predictability. + + + +Note that you have both the question and the answer because it is necessary to ensure the correctness of the chain of thought. However, in your response, you can only refer to the content of the question and the audio, and lead to the answer. You must absolutely not assume that you already know the answer. Please provide a detailed and flexible response with high-quality logic in both the caption and reasoning sections. If the reasoning part requires complex logic, you can even propose several different approaches and try them one by one. Here is the original description: *** caption here ***. + +The question is: *** question here ***. + +The answer you can refer to : *** answer here ***. + +Again, don't mention that you have the answer and the description because they are only here to help you to design the chain of thought but should not exist in the real-world scenario, either in the think or response sections. + +# A.3 Prompt of Stage 3 when Processing Data (Sample from AudioSet) + +We are data reviewers. Next, you will receive an audio clip, along with its description, questions, answers, and most importantly, the thought process for solving the problems. Please determine and analyze whether all of these elements are completely correct, especially check if there are any hallucinations in the thought process. Return if there are no issues, and if there are errors in the data. + +Here is the description of the audio: *** caption here ***. + +Here is the question: *** question here ***. + +Here is the answer: *** answer here ***. + +And here is the thought process: *** COT process here ***. + +Please conduct a thorough judgment and analysis and provide the result in the specified format. + +# B Synthetic Data Generation Pipeline + +# B.1 Synthetic Data Introduction + +Multi-Speaker Dataset: To enhance the model's ability to comprehend complex, multi-turn conversations among multiple speakers, we constructed the Multi-Speaker dataset using text-to-speech (TTS) technology. The dataset generation process consists of three steps: (1) Conversation Text Generation: We utilized commercial models to generate diverse multi-speaker conversation texts covering a wide range of scenarios. (2) Speech Synthesis: Leveraging all available timbres from LibriSpeech [Panayotov et al., 2015] as prompts, we employed the CosyVoice2 [Du et al., 2024] framework to synthesize high-quality speech samples. (3) Dataset Assembly: The synthesized speech samples, fearring distinct timbres, were carefully combined to create a rich and diverse multi-speaker conversation dataset. + +This approach ensures that the dataset effectively trains the model to navigate intricate speaker interactions in natural conversations. + +Complex Audio Dataset: To strengthen the model's ability to process intricate audio compositions, we developed the Complex Audio dataset. The audio materials are sourced from AudioSet, AudioCaps, and our internal material library. The dataset consists of two primary types of audio data: (1) Temporally Complex Audio: Short audio clips were sequentially arranged, requiring the model to perform tasks such as sorting or counting based on temporal relationships. We sequentially arranged different short audio clips and required the model to perform sorting or counting tasks. (2) Composite Complex Audio: Multiple long-form audio tracks—such as engine sounds and musical instruments—were blended. The model was then tasked with extracting specific target sounds from the composite audio, enhancing its ability to analyze overlapping audio sources. + +This dataset ensures that the model is well-equipped to handle real-world audio challenges, such as distinguishing between multiple concurrent sounds and identifying temporal patterns. + +To better understand what Multi-Speaker dataset and Complex Audio dataset are respectively tasked with, we provide some examples in Sec. B.2, Sec. B.3. + +# B.2 Sample from Complex Audio Dataset + + + + +The question is about identifying the sounds in the audio and arranging them in the correct order. I will carefully listen to the audio multiple times, noting down the sounds as I recognize them. Then, I will compare the identified sound sequence with the provided answer options to choose the one that matches best. + + + +Upon initial listening, I can discern sounds that seem like a shattering noise, an animal sound which I think sounds like a cow, a cat sound, and what sounds like some mechanical tools working, perhaps a chainsaw. I shall listen again to clarify the sounds and their order. + + + +1. After relistening, the first sound is definitely glass breaking. + +2. The second sound is definitely a cow moing. +3. The third sound is a cat meowing. +4. The fourth sound is a chainsaw. +5. I compare this sequence (glass break, cow, cat, chainsaw) with the options provided. +6. Option (a) matches the order I identified. + +Based on the detailed listening and analysis, the correct order of sounds in the audio is: glass break, cow, cat, chainsaw. This matches option (a). + + + +The answer is a. + + +# B.3 Sample from Multi-Speaker Dataset + + + + + +The user asks me to identify the number of speakers in the audio clip. To do this, I will listen carefully and try to distinguish unique voices. The conversation is between two hikers, I need to confirm this by identifying their individual lines and vocal characteristics. Finally, I will provide the answer based on the number of identified speakers. + + + + + +The audio presents a conversation between two individuals hiking. + +Speaker 1: This person starts by commenting on the intensity of the climb and expresses doubt about continuing without a break. Later, after a reminder about the ranger's warning, this speaker agrees to turn back. The vocal tone suggests exertion and a desire to reach the summit, eventually giving way to agreement. Phrases spoken: "Wow, this climb is intense. I am not sure I can go much further without a serious break," "Turn back. But we're so close to the summit. I really want to make it, you know," "Unpredictable mountain weather, okay. Okay, you are right." + +Speaker 2: This person responds to the first speaker's concerns, suggesting they turn back due to the darkening sky and diminishing daylight. They emphasize safety and remind the other speaker about the ranger's warning. The vocal tone conveys concern and prioritizes safety. Phrases spoken: "I know right. I am starting to think we should turn back. That sky looks awfully dark, and we're losing daylight quickly," "I hear you, but safety first. Remember what the ranger said about the unpredictable mountain weather," "So, turn back." + +Based on the clear distinction in vocal tones and the back-and-forth nature of the dialogue, I can confirm that there are two distinct speakers in this audio clip. + + + + + +Initial Assessment: The audio starts with two distinct voices in a hiking - related dialogue. The voices are clearly distinguishable from the get - go, with one having a deeper timbre and the other a higher - pitched one. + +Dialogue Analysis: A close look at the content shows a clear difference in the speakers' stances. One is eager to press on to the summit, saying, "We're almost there. Let's keep going." The other is more cautious, citing the darkening sky and fading daylight, stating, "The weather looks bad. We should turn back." + +Vocal Tone Differentiation: Each speaker's vocal tone stays consistent. The eager speaker has an energetic and determined tone, with a rising pitch when advocating for continuing. The cautious speaker has a measured, worried tone, with a lower pitch when warning of danger. + +Confirming Speaker Count: A review of the audio shows no extra voices or interruptions. + +The conversation moves smoothly between the two speakers, indicating only two are present. + +Final Answer: Based on this analysis, there are two individuals in the conversation. + +Thus, the answer is B. + + + + + +Based on the dialogue and vocal tone analysis, there are two distinct speakers in the audio. + +The answer is B. + + + + + + + +Based on the dialogue and distinct vocal tones present in the audio, I have identified two individuals engaged in the conversation. Therefore, the answer is B. + + + +![](images/2c7cf6efc88d700632067417e48e5a03b2a49288065af2afae5640b0f1376a7a.jpg) +C Further Dataset Analysis +Figure 6: Detailed information of the CoTA dataset: The maximum, minimum, and average number of words in the answers, as well as the average length of the audio. + +CoTA's reasoning adaptability is also evident in its distribution of token lengths and audio duration across tasks in Figure 6. The dataset accommodates a broad range of reasoning complexities, with token lengths spanning 327 (CoVoST2) to 1,454 (Multi-Speaker), ensuring coverage of both concise and highly intricate reasoning processes. Notably, tasks requiring deep logical inference, such as complex audio, exhibit a well-balanced token distribution $(\max = 423, \text{avg} = 192.96)$ , supporting structured multi-step reasoning without unnecessary redundancy. + +Further, the dataset's average token lengths (164.48-481.57) align with task difficulty: longer reasoning chains characterize tasks such as sound description (AudioSet: 395.26) and music understanding (MusicBench: 463.89). Meanwhile, CoTA ensures practical generalization by maintaining audio durations between 2.85s and 26.34s, where shorter clips (e.g., MELD: 2.84s) support concise context-dependent reasoning, while extended sequences (Multi-Speaker: 26.34s) enable complex multi-turn inference. This systematic variation in reasoning depth and audio granularity ensures adaptability across diverse tasks, addressing the limitation of one-size-fits-all reasoning chains in existing audio datasets. + +# D More Case Studies + +![](images/9b43c9b6c9f74919ef542d987cdc96e3d1e58021911f412304ed512d5557ae1d.jpg) +Figure 7: Example of Audio-Reasoner answering music-based question. + +User + +Analyze the interplay between the vocal sample, synth elements, and percussion within the track. How do these elements contribute to the overall texture and dynamic progression of the piece, particularly focusing on the shift from the intro to the main rhythm? + +![](images/858369a50834266a93f34f3916a01464cba0e42a19f40a500f1166c517b2870b.jpg) + +![](images/ba4834b047ef995baaa14b47b6dfb094678968914445d6cc380fd50ae8d1ba44.jpg) + +Audio Reasoner + +# + +The user is asking us to identify potential influences or subgenres within rock music based on the provided instrumental track's instrumentation, chord progression, and overall sonic character. I will start with analyzing the chord progression, instrumentation, and overall sonic character of the music track. Then I will use these analyses to deduce potential influences and subgenres. + +The music features a rock-oriented instrumental piece. The lead melody is carried by an electric guitar, featuring a slight overdrive or distortion. Acoustic rhythm guitars provide harmonic support, panned to the left and right. A slap-style bass guitar and acoustic drums form the rhythm section. The song follows a medium tempo in 4/4 time, with a chord progression of Bb, Ab6, F, and Ab6. + + The chord progression, Bb, Ab6, F, Ab6, has blues rock similarities because it includes flat 7 chords. Electric guitar with overdrive or distortion, a driving rhythm section (bass and drums), and acoustic rhythm guitars are characteristics of rock music. Slap bass is used in funk music. Given these considerations, several potential influences and subgenres present themselves. The blues and rock elements of the chord progression could indicate the influence of blues rock and hard rock, which often incorporate blues-based progressions with louder, more aggressive instrumentation. The slap bassline suggests a funk influence, which can be found in funk rock and other rock subgenres that incorporate funk elements. The overall sonic character is most indicative of rock, with potential cross-pollination from blues and funk. + +Based on the chord progression of Bb, Ab6, F, Ab6, the prominent electric guitar solo with overdrive, the acoustic rhythm guitars, the slap-style bass, and the acoustic drums, the music has potential influences or subgenres within rock music. These may include blues rock, hard rock, and funk rock. + +# + +# + +Considering the instrumentation, the chord progression (Bb, Ab6, F, Ab6), and the overall sonic character of the piece, this instrumental track may draw upon influences from several subgenres within rock music. The chord progression has blues and rock similarities, pointing towards blues-rock and hard-rock elements. Moreover, the slap bassline indicates the touch of funk. + +# + +![](images/a36cb05d818d194a4e6d2b64a41064f13fcd8e3fecc09d832b319f34886fd1c0.jpg) +User + +Considering the sounds present in the audio, describe the likely environment in which the dog is located and detail the types of interactions or activities the dog might be engaged in. + +![](images/6656392ff0178a74a688308bf440905dfacb28c80a2e0b33f331b3730e202032.jpg) + +# + +The question asks about the dog's environment and activities based on the audio. I need to carefully analyze the audio to identify relevant sounds, infer the setting, and deduce potential interactions. + +The audio contains dog barks of varying intensity and pitch. There are also sounds suggesting movement, such as rustling or the distinct sounds of the dog moving on what sounds to be hard floor surfaces. These movement sounds occur both during and between the barking, with the barks themselves being the main focus of sound. The pitch of the bark changes at different points, from higher to lower and back again. + +# + +1. **Environment Inference:** The presence of hard floor surface sounds, coupled with the absence of specific outdoor environmental noises (e.g., wind, traffic, birdsong), leans towards an indoor location. The changing pitch and tone of barks show the arousal level is rapidly changing. +2. **Activity Inference:** The barking, interspersed with the sound of movement, indicates the dog is active. The varied bark intensity could suggest excitement, playfulness, or even alert behavior in relation to other sounds. +3. **Interaction Possibilities:** Based on the sounds alone, it's difficult to pinpoint the exact interaction. It could be playing with a toy, reacting to someone entering the room, or engaging in a game. + +\nThe dog is likely indoors on a hard floor surface. The audio suggests the dog is actively moving and barking, possibly engaged in play or reacting to something in its immediate surroundings. The dog is probably reacting to a stimulus within its environment, resulting in a change in the dog's arousal and tone and pitch of barks. + +# + +# + +Based on the audio, the dog is likely located in an indoor environment with hard floor surface sounds. The varied intensity and sound of the dog's barks, combined with the sound of its movement, indicate the dog is actively moving and vocalizing, and that the arousal level of the dog is actively changing. Given this information, the dog might be engaged in some activity or play, perhaps reacting to a toy or another person within the room. + +# + +![](images/aff642ef84ae66fc0319f623312be2998e8ccbd4665d511a6391ba6638e6705a.jpg) +Figure 8: Example of Audio-Reasoner answering sound-based question. +Audio Reasoner + +![](images/250663b1591e6ae5dbf7df5a0b76757a799f877069a3d178ee425094da48c030.jpg) +User + +What did the second person say about the rose bush? + +(a) They like it. +(b) It's the bane of their existence. +(c) It needs more fertilizer. +(d) It provides good shade. + +![](images/18e4c56940d96a5d28d7df8cd5be561661e52ea20dcad7561fe0902a688b15f5.jpg) + +# + +The question asks about what the second person said about the rose bush. The audio presents a conversation between two neighbors, Amelia and Ben. I need to identify who the second speaker is and what they said about the rose bush. Then, I need to match what the second speaker said with the given options. + +The audio presents a conversation between Amelia and Ben about the upcoming block party and a troublesome rose bush. + +**Speaker 1 (Amelia)**: Asks Ben if he's attending the block party and mentions the rose bush causing him trouble. She says "You mentioned weedies, that rose bush still causing you trouble, the one that keeps growing over the fence?". She also mentions she has been scratched by it. + +**Speaker 2 (Ben)**: Confirms he's attending the block party, entering his chili in the cook-off, and complains about the rose bush. He says "That thorny rose bush is the bane of my existence. It's constantly scratching me when I am gardening. So, yes, it's still causing me trouble." and suggests chopping it down. $\backslash$ n\`nTherefore, Ben is the second speaker. + +# + +1. **Identify the second speaker:** Based on the audio, Ben is the second speaker. +2. **Identify what Ben said about the rose bush:** Ben says "That thorny rose bush is the bane of my existence. It's constantly scratching me when I am gardening. So, yes, it's still causing me trouble." and suggests chopping it down. +3. **Match Ben's statement with the options:** +- (a) They like it. - This is incorrect because Ben clearly dislikes the rose bush. +- (b) It's the bane of their existence. - This aligns perfectly with Ben's statement. +- (c) It needs more fertilizer. - This is not mentioned in the audio. +- (d) It provides good shade. - This is also not mentioned in the audio. + +Therefore, the correct answer is (b). + +\nBased on the audio, the second speaker, Ben, describes the rose bush as "the bane of my existence" and complains about it scratching him. Therefore, the answer is (b). + +# + +# + +The second person, Ben, explicitly states that the rose bush $"is the bane of my existence"$ . Therefore, the answer is B. + + + +![](images/e1fbcf9ad869804d415d3768a1bafdb4f20a43d182c0fe8e203fecf5c5920fde.jpg) +Figure 9: Example of Audio-Reasoner answering multi-speaker speech-based question. +Audio +Reasoner + +![](images/bf0107d5c8c65914f99bf9fa3e65a54865c8b756def2b36ec09a1f74a1a04e7b.jpg) +User + +Given that you hear buzzing plastic, identify the background sound? + +(a). A washing machine running +(b). Rain falling on a window +(c). A toilet flushing +(d). Waves crashing on a shore + +![](images/f73ca89ffd4040b5bfdf6ba30b94d0ae554820407dfdcdd29189fdf86508fba7.jpg) + +# + +# + +I will carefully and methodically approach the task of identifying the background sound in the audio clip. This will allow me to focus solely on the characteristics of the background noise. Once the background sound is isolated, I will analyze it in detail to determine its key qualities, such as its consistency, rhythm, and texture. Afterward, I will compare these characteristics to the provided answer options. + + Upon my initial listening, I observed that the background sound possesses characteristics that suggest fluid movement. There is a continuous, almost rhythmic, sound with subtle gurgling and sloshing. This could indicate the presence of water or some other flowing liquid. As I continue to listen more carefully, the flow becomes even more evident, suggesting a consistent rush or stream-like sound. + +# + +1. **Sound Characteristics:** My first observation is that the background sound features a continuous, almost rhythmic quality, with faint gurgling noises. These characteristics often point to a fluid in motion. Upon further listening, I recognize that the sound might be created by a liquid flowing from one place to another. +2. **Detailed Listening:** I focus on the quieter sections of the sound, where the fluid dynamics become clearer. The consistent, almost rushing quality suggests the movement of water rather than irregular drops or splashes. +3. **Comparison to Options:** With this in mind, I now compare the sound characteristics to the provided answer options. - "A toilet flushing," which often involves a steady rush of water with gurgling and sloshing as the water flows through pipes. This closely matches the consistent flow and gurgling noises I hear in the background. +4. **Conclusion:** Based on my analysis, the sound best aligns with the description of a toilet flushing, due to the nature of the consistent, rushing water and subtle gurgling sounds. + +# + +# + +After listening to the audio carefully, isolating the background sound, and comparing it to the answer options, I concluded that the background sound most closely matches the sound of a toilet flushing. The key factors leading to this conclusion include the continuous rush of water and the gurgling quality, which are typical of a flushing toilet. + +# + +![](images/02e9abd6638b2b63d0c5724c13130da9bc69a42b07a4d1726ec78a20919f111b.jpg) +Figure 10: Example of Audio-Reasoner answering complex-audio-based question. +Audio +Reasoner \ No newline at end of file diff --git a/data/2025/2503_02xxx/2503.02318/images.zip b/data/2025/2503_02xxx/2503.02318/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..6b3cb43436e42d1a0fd492ee84af285fedd423de --- /dev/null +++ b/data/2025/2503_02xxx/2503.02318/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03fba1d70509c7b63f1ef2844f72d6b19e60c2cc3ce91922040f8d5e61021e98 +size 1037863 diff --git a/data/2025/2503_02xxx/2503.02318/layout.json b/data/2025/2503_02xxx/2503.02318/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..b09b0799b18642082fbff1ef63152e45b015dce6 --- /dev/null +++ b/data/2025/2503_02xxx/2503.02318/layout.json @@ -0,0 +1,16345 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 109, + 96, + 500, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 96, + 500, + 138 + ], + "spans": [ + { + "bbox": [ + 109, + 96, + 500, + 138 + ], + "type": "text", + "content": "Audio-Reasoner: Improving Reasoning Capability in Large Audio Language Models" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 206, + 178, + 403, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 206, + 178, + 403, + 190 + ], + "spans": [ + { + "bbox": [ + 206, + 178, + 403, + 190 + ], + "type": "text", + "content": "Zhifei Xie" + }, + { + "bbox": [ + 206, + 178, + 403, + 190 + ], + "type": "inline_equation", + "content": "^{1\\dagger}" + }, + { + "bbox": [ + 206, + 178, + 403, + 190 + ], + "type": "text", + "content": " Mingbao Lin" + }, + { + "bbox": [ + 206, + 178, + 403, + 190 + ], + "type": "inline_equation", + "content": "^{3\\dagger}" + }, + { + "bbox": [ + 206, + 178, + 403, + 190 + ], + "type": "text", + "content": " Zihang Liu" + }, + { + "bbox": [ + 206, + 178, + 403, + 190 + ], + "type": "inline_equation", + "content": "^{2\\dagger}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 186, + 190, + 423, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 190, + 423, + 202 + ], + "spans": [ + { + "bbox": [ + 186, + 190, + 423, + 202 + ], + "type": "text", + "content": "Pengcheng Wu1 Shuicheng Yan2† Chunyan Miao1‡" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 143, + 202, + 467, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 202, + 467, + 213 + ], + "spans": [ + { + "bbox": [ + 143, + 202, + 467, + 213 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 143, + 202, + 467, + 213 + ], + "type": "text", + "content": "Nanyang Technological University " + }, + { + "bbox": [ + 143, + 202, + 467, + 213 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 143, + 202, + 467, + 213 + ], + "type": "text", + "content": "National University of Singapore " + }, + { + "bbox": [ + 143, + 202, + 467, + 213 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 143, + 202, + 467, + 213 + ], + "type": "text", + "content": "Rakuten" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 213, + 213, + 397, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 213, + 213, + 397, + 226 + ], + "spans": [ + { + "bbox": [ + 213, + 213, + 397, + 226 + ], + "type": "text", + "content": "†Equal Contributions ‡Corresponding Authors" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 151, + 226, + 459, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 226, + 459, + 236 + ], + "spans": [ + { + "bbox": [ + 151, + 226, + 459, + 236 + ], + "type": "text", + "content": "zhifei001@e.ntu.edu.sg linmb001@outlook.com liuzihang99@gmail.com" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 165, + 238, + 444, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 238, + 444, + 247 + ], + "spans": [ + { + "bbox": [ + 165, + 238, + 444, + 247 + ], + "type": "text", + "content": "pengchengwu@ntu.edu.sg yansc@nus.edu.sg ascymiao@ntu.edu.sg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 180, + 248, + 430, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 180, + 248, + 430, + 258 + ], + "spans": [ + { + "bbox": [ + 180, + 248, + 430, + 258 + ], + "type": "text", + "content": "Project: https://github.com/xzf-thu/Audio-Reasoner" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 281, + 286, + 329, + 298 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 281, + 286, + 329, + 298 + ], + "spans": [ + { + "bbox": [ + 281, + 286, + 329, + 298 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 140, + 308, + 470, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 308, + 470, + 441 + ], + "spans": [ + { + "bbox": [ + 140, + 308, + 470, + 441 + ], + "type": "text", + "content": "Recent advancements in multimodal reasoning have largely overlooked the audio modality. We introduce Audio-Reasoner, a large-scale audio language model for deep reasoning in audio tasks. We curate a diverse collection of multi-task audio datasets with simple annotations, refining them through structured secondary labeling and complex question generation. Additionally, We utilize advanced closed-source models to generate structured reasoning chains, transforming raw annotations into a formatted inference process. Following inference scaling principles, we train Audio-Reasoner on CoTA, a high-quality reasoning dataset with 1.2 million reasoning-rich samples. Experiments show state-of-the-art performance across key benchmarks, including MMAU-mini " + }, + { + "bbox": [ + 140, + 308, + 470, + 441 + ], + "type": "inline_equation", + "content": "(+25.42\\%)" + }, + { + "bbox": [ + 140, + 308, + 470, + 441 + ], + "type": "text", + "content": ", AIR-Bench chat/foundation " + }, + { + "bbox": [ + 140, + 308, + 470, + 441 + ], + "type": "inline_equation", + "content": "(+14.57\\% / + 10.13\\%)" + }, + { + "bbox": [ + 140, + 308, + 470, + 441 + ], + "type": "text", + "content": ", and MELD " + }, + { + "bbox": [ + 140, + 308, + 470, + 441 + ], + "type": "inline_equation", + "content": "(+8.01\\%)" + }, + { + "bbox": [ + 140, + 308, + 470, + 441 + ], + "type": "text", + "content": ". Our findings stress the core of structured CoT training in advancing audio reasoning." + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 116, + 460, + 496, + 661 + ], + "blocks": [ + { + "bbox": [ + 116, + 460, + 496, + 661 + ], + "lines": [ + { + "bbox": [ + 116, + 460, + 496, + 661 + ], + "spans": [ + { + "bbox": [ + 116, + 460, + 496, + 661 + ], + "type": "image", + "image_path": "6b8d76aec6a568542a810d60105df57d132003453be82af5476ad0dd56265004.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 671, + 504, + 706 + ], + "lines": [ + { + "bbox": [ + 104, + 671, + 504, + 706 + ], + "spans": [ + { + "bbox": [ + 104, + 671, + 504, + 706 + ], + "type": "text", + "content": "Figure 1: Benchmark performance of Audio-Reasoner on tasks of speech emotion recognition (SER), speech-to-text translation (S2TT), MMAU, and AIR-Bench chat. S2TT is measured using BLEU while SER and MMAU are measured ACC (left axis). AIR-Bench is scored by GPT (right axis)." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 220, + 37, + 568 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 220, + 37, + 568 + ], + "spans": [ + { + "bbox": [ + 14, + 220, + 37, + 568 + ], + "type": "text", + "content": "arXiv:2503.02318v2 [cs.SD] 20 Sep 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 731, + 168, + 742 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 731, + 168, + 742 + ], + "spans": [ + { + "bbox": [ + 105, + 731, + 168, + 742 + ], + "type": "text", + "content": "Technical report." + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 71, + 191, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 71, + 191, + 83 + ], + "spans": [ + { + "bbox": [ + 106, + 71, + 191, + 83 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 90, + 506, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 90, + 506, + 244 + ], + "spans": [ + { + "bbox": [ + 104, + 90, + 506, + 244 + ], + "type": "text", + "content": "Recent advancements in large language models (LLMs) have significantly enhanced their reasoning capabilities, driven by innovations like chain-of-thought (CoT) and inference scaling. Models such as OpenAI's o1 [Jaech et al., 2024] and Deepseek-R1 [Guo et al., 2025] have demonstrated significant improvements, especially in tasks like mathematics and coding [Team et al., 2025, Zhao et al., 2024a, Muennighoff et al., 2025, Liu et al., 2024a, Zhang et al., 2024b, Deng et al., 2024, Hui et al., 2024, Yang et al., 2024a]. These models have set a precedent for achieving \"deep thinking\" by tackling complex, structured tasks. The CoT framework, in particular, has been successfully applied to multimodal models, improving their cognitive abilities in image and video reasoning. Models such as Visual-CoT [Shao et al., 2024a], LLaVA-Reasoner [Zhang et al., 2024a], and MAmmoTH-VL [Guo et al., 2024] have demonstrated that large-scale datasets and multidimensional reasoning can enhance model performance, even in tasks involving images and videos [Zou et al., 2023]. Other works, like Mulberry [Wen et al., 2019] and Image-of-Thought [Zhou et al., 2024], integrate reflective and image-editing tools to further refine multimodal reasoning, offering new pathways for understanding complex queries across multiple modalities." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 249, + 506, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 249, + 506, + 382 + ], + "spans": [ + { + "bbox": [ + 104, + 249, + 506, + 382 + ], + "type": "text", + "content": "However, the application of CoT in the audio modality has been largely underexplored. Although models like Audio Flamingo [Kong et al., 2024], SALMONN [Tang et al., 2023], and Qwen2-Audio [Chu et al., 2024] have pushed the boundaries of large audio language models (LALMs), these advancements have not yet incorporated CoT reasoning at scale. Recent research [Ma et al., 2025] suggests that existing CoT methods, such as zero-shot reasoning in audio tasks, fail to significantly improve performance on more complex queries. This limitation is largely attributed to the simplicity of existing audio datasets—such as AudioSet [Gemmeke et al., 2017], AudioCaps [Kim et al., 2019], and Clotho [Drossos et al., 2020)—which predominantly feature short, simple labels. These simplified datasets hinder the development of LALMs capable of more intricate reasoning. Without richer, more complex data, these models struggle with long-form reasoning, and the application of CoT often leads to severe hallucinations and degraded performance. Therefore, advancing CoT in LALMs necessitates overcoming these dataset limitations to allow for more effective, deep reasoning." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 387, + 506, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 387, + 506, + 540 + ], + "spans": [ + { + "bbox": [ + 104, + 387, + 506, + 540 + ], + "type": "text", + "content": "To address the challenges in audio-based reasoning, we propose a scalable and effective approach to generating high-quality pretraining data. Using state-of-the-art commercial models, we introduce CoTA, a large-scale dataset containing 1.2 million refined captions and question-answer (QA) pairs. CoTA spans multiple datasets and tasks, undergoing rigorous filtering to ensure diversity and quality. Building on CoTA, we develop Audio-Reasoner, a large audio language model designed for long-context reasoning. Audio-Reasoner is trained with a 4K token context window and generates structured CoT reasoning with length could more than exceeding 1K tokens in real-world tasks. The model is fine-tuned on CoTA using supervised fine-tuning, adhering to a structured reasoning framework, as illustrated in Figure 2: (1) Planning—Identifies key problem components from the user query and formulates a structured sequence of reasoning steps essential for deriving an answer. (2) Caption—Extracts and integrates relevant multimodal content from the input to enrich the reasoning process. (3) Reasoning—Executes a systematic, step-by-step reasoning procedure to ensure logical coherence and accuracy. (4) Summary—Synthesizes the reasoning process into a final response that is concise, contextually grounded, and precise." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 545, + 506, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 545, + 506, + 666 + ], + "spans": [ + { + "bbox": [ + 104, + 545, + 506, + 666 + ], + "type": "text", + "content": "Our experimental results, partially presented in Figure 1, demonstrate the effectiveness of Audio-Reasoner. More comprehensively, we evaluate the model across multiple benchmarks: MMAU-mini [Sakshi et al., 2024]: A dataset with 1,500 closed-choice questions testing reasoning across sound, speech, and music. AIR-Bench [Yang et al., 2024b]: Various types of audio signals including human speech, natural sounds, and music. CoVoST2(zh-en) [Wang et al., 2021]: Speech-to-text translation in Chinese and English. MELD [Poria et al., 2019]: Emotion classification. Compared to Qwen2-Audio-Instruct [Chu et al., 2024], Audio-Reasoner achieves: " + }, + { + "bbox": [ + 104, + 545, + 506, + 666 + ], + "type": "inline_equation", + "content": "+25.4\\%" + }, + { + "bbox": [ + 104, + 545, + 506, + 666 + ], + "type": "text", + "content": " improvement on MMAU-mini with reasoning subtask gains: " + }, + { + "bbox": [ + 104, + 545, + 506, + 666 + ], + "type": "inline_equation", + "content": "+44.4\\%" + }, + { + "bbox": [ + 104, + 545, + 506, + 666 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 545, + 506, + 666 + ], + "type": "inline_equation", + "content": "+26.1\\%" + }, + { + "bbox": [ + 104, + 545, + 506, + 666 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 545, + 506, + 666 + ], + "type": "inline_equation", + "content": "+9.3\\%" + }, + { + "bbox": [ + 104, + 545, + 506, + 666 + ], + "type": "text", + "content": "; " + }, + { + "bbox": [ + 104, + 545, + 506, + 666 + ], + "type": "inline_equation", + "content": "+14.6\\%" + }, + { + "bbox": [ + 104, + 545, + 506, + 666 + ], + "type": "text", + "content": " gains on AIR-Bench chat; " + }, + { + "bbox": [ + 104, + 545, + 506, + 666 + ], + "type": "inline_equation", + "content": "+30.6\\%" + }, + { + "bbox": [ + 104, + 545, + 506, + 666 + ], + "type": "text", + "content": " on CoVoST2(ZN/EN subset, Average BLEU score.); " + }, + { + "bbox": [ + 104, + 545, + 506, + 666 + ], + "type": "inline_equation", + "content": "+8.01\\%" + }, + { + "bbox": [ + 104, + 545, + 506, + 666 + ], + "type": "text", + "content": " on MELD. These results validate the effectiveness of our approach in advancing long-context reasoning and inference scaling for audio models." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 672, + 350, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 672, + 350, + 684 + ], + "spans": [ + { + "bbox": [ + 105, + 672, + 350, + 684 + ], + "type": "text", + "content": "The major contributions we have made in this paper include:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 689, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 689, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 689, + 504, + 723 + ], + "type": "text", + "content": "- We propose Audio-Reasoner, designed for deep reasoning and inference scaling in the audio modality. Built upon Qwen2-Audio and fine-tuned with structured CoT training, Audio-Reasoner significantly improves long-context reasoning across diverse audio tasks." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 506, + 177 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "content": "- We develop CoTA, a large-scale dataset with 1.2 million high-quality captions and QA pairs, spanning multiple audio domains. The dataset enables structured reasoning and enhances audio-language pretraining." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 114, + 504, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 114, + 504, + 148 + ], + "spans": [ + { + "bbox": [ + 104, + 114, + 504, + 148 + ], + "type": "text", + "content": "- We introduce a scalable data generation pipeline leveraging advanced commercial models to produce complex reasoning-based QA pairs and structured CoT annotations, enriching model training." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 154, + 506, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 154, + 506, + 177 + ], + "spans": [ + { + "bbox": [ + 104, + 154, + 506, + 177 + ], + "type": "text", + "content": "- We achieve state-of-the-art performance, with " + }, + { + "bbox": [ + 104, + 154, + 506, + 177 + ], + "type": "inline_equation", + "content": "+25.4\\%" + }, + { + "bbox": [ + 104, + 154, + 506, + 177 + ], + "type": "text", + "content": " gains over Qwen2-Audio-Instruct on MMAU-mini, along with significant improvements in reasoning, translation, and emotion recognition tasks." + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 190, + 197, + 202 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 190, + 197, + 202 + ], + "spans": [ + { + "bbox": [ + 105, + 190, + 197, + 202 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 212, + 506, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 212, + 506, + 354 + ], + "spans": [ + { + "bbox": [ + 104, + 212, + 506, + 354 + ], + "type": "text", + "content": "Chain-of-Thought Reasoning. LLMs leverage in-context learning (ICL) to enhance their reasoning capabilities by processing prompts and context. This is further strengthened through CoT techniques. Various CoT methods have been explored, including Tree of Thoughts (TOT) [Yao et al., 2023], manual few-shot CoT [Wei et al., 2022], and automatically generated approaches [Jin et al., 2024, Zhang et al., 2022]. In addition, studies have delved into the necessity of CoT, its theoretical foundations, and its effectiveness across a wide range of tasks [Sprague et al., 2024, Stechly et al., 2024, Turpin et al., 2023]. The release of OpenAI's o1 model [Jaech et al., 2024] has sparked renewed interest in CoT research, significantly boosting the capabilities of LLMs, especially in multi-step reasoning tasks such as coding [Zhang et al., 2024b] and mathematics [Yang et al., 2024a], setting new performance benchmarks. CoT techniques have been integrated with other methods such as Monte Carlo Tree Search [Browne et al., 2012], reflection [Guo et al., 2025], and tool use [Qin et al., 2023], and are often trained alongside reinforcement learning approaches [Rafailov et al., 2023, Zeng et al., 2024, Engstrom et al., 2020, Shao et al., 2024b]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 361, + 506, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 361, + 506, + 483 + ], + "spans": [ + { + "bbox": [ + 104, + 361, + 506, + 483 + ], + "type": "text", + "content": "Multimodal Chain-of-Thought. CoT techniques have also been explored in the realm of multimodal large models. For example, Visual-COT [Shao et al., 2024a] incorporates object detection to aid in reasoning, LLaVA-Reasoner [Zhang et al., 2024a] uses closed-source models for CoT fine-tuning through recaptioning. LLaVA-CoT [Xu et al., 2024] and MAmmoTH-VL [Guo et al., 2024] scale datasets to improve model performance. Other models, such as Mulberry [Wen et al., 2019], explore application of reflective thinking, and Image-of-Thought [Zhou et al., 2024] integrates image editing tools to enhance reasoning. Video-related studies [Wang et al., 2024, Han et al., 2024, Fei et al., 2024, Tang et al., 2024] have demonstrated the effectiveness of CoT in reasoning tasks within the video domain. However, the application of CoT in the audio domain is still in its infancy. The study Audio-COT [Ma et al., 2025] shows some improvement with zero-shot COT in audio tasks, but it falls short on more complex problems. This paper aims to explore this gap further." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 491, + 506, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 491, + 506, + 612 + ], + "spans": [ + { + "bbox": [ + 104, + 491, + 506, + 612 + ], + "type": "text", + "content": "Large Audio Language Models. LALMs can be broadly categorized into two areas: audio understanding and real-time dialogue. Audio understanding models typically consist of a three-layer architecture—an encoder, connector, and an LLM—focusing on specific domains, as seen in models like Mu-LLaMA [Liu et al., 2024b], LTU [Gong et al., 2023b], EmoBox [Ma et al., 2024], and GAMA [Ghosh et al., 2024]. Other models, such as LTU-AS [Gong et al., 2023a], SALMONN [Tang et al., 2023] and Qwen2-Audio [Chu et al., 2024], employ unified architectures designed for multi-task training. Real-time conversation models, which focus on speech input and extend transformers to real-time speech synthesis, are also gaining popularity [Zhang et al., 2023b, Xie and Wu, 2024a,b, Fu et al., 2025, Defossez et al., 2024]. However, despite their focus on understanding and rapid response, current LALMs still lack significant exploration into reasoning tasks, such as COT. This paper addresses this gap by investigating the application of CoT in LALMs." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 624, + 209, + 636 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 624, + 209, + 636 + ], + "spans": [ + { + "bbox": [ + 105, + 624, + 209, + 636 + ], + "type": "text", + "content": "3 Audio-Reasoner" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 645, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 645, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 645, + 504, + 723 + ], + "type": "text", + "content": "In this section, we present the training methodology for our Audio-Reasoner model, designed to enable LALMs with structured, step-by-step reasoning in multimodal tasks. Unlike standard models, Audio-Reasoner generates both a chain-of-thought (CoT) and final response, ensuring logical consistency and interpretability. We describe the training framework, which includes a structured CoT approach, a novel data generation pipeline, and the large-scale CoTA dataset. This dataset is carefully constructed through automated annotation, reasoning chain creation, and validation, setting the foundation for robust and explainable audio-based reasoning." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 71, + 504, + 293 + ], + "blocks": [ + { + "bbox": [ + 106, + 71, + 504, + 293 + ], + "lines": [ + { + "bbox": [ + 106, + 71, + 504, + 293 + ], + "spans": [ + { + "bbox": [ + 106, + 71, + 504, + 293 + ], + "type": "image", + "image_path": "43b5b9c5fc69a44f947aab314c7528399913cc4f40054888654c56601c09c66b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 302, + 506, + 348 + ], + "lines": [ + { + "bbox": [ + 104, + 302, + 506, + 348 + ], + "spans": [ + { + "bbox": [ + 104, + 302, + 506, + 348 + ], + "type": "text", + "content": "Figure 2: Comparison between Audio-Reasoner and Qwen2-Audio-Instruct: While Qwen2-Audio-Instruct produces brief and error-prone reasoning, our Audio-Reasoner uses a structured reasoning framework with distinct \"thinking\" and \"response\" phases, ensuring a more reliable and well-supported output through careful planning, information extraction, and step-by-step reasoning." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 369, + 293, + 380 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 369, + 293, + 380 + ], + "spans": [ + { + "bbox": [ + 105, + 369, + 293, + 380 + ], + "type": "text", + "content": "3.1 Model Training with Audio Reasoning" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 384, + 504, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 384, + 504, + 407 + ], + "spans": [ + { + "bbox": [ + 105, + 384, + 504, + 407 + ], + "type": "text", + "content": "A standard large language model is trained to generate an output sequence " + }, + { + "bbox": [ + 105, + 384, + 504, + 407 + ], + "type": "inline_equation", + "content": "Y" + }, + { + "bbox": [ + 105, + 384, + 504, + 407 + ], + "type": "text", + "content": " given an input sequence " + }, + { + "bbox": [ + 105, + 384, + 504, + 407 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 105, + 384, + 504, + 407 + ], + "type": "text", + "content": ". The probability distribution of the model's output is formulated as:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 260, + 414, + 505, + 427 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 260, + 414, + 505, + 427 + ], + "spans": [ + { + "bbox": [ + 260, + 414, + 505, + 427 + ], + "type": "interline_equation", + "content": "P (Y \\mid X; \\theta) = f _ {\\theta} (X), \\tag {1}", + "image_path": "bcee43ed17dec3634a4272e89c7d65e02c05defeb37129e3cd28b403c88c188d.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 434, + 504, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 434, + 504, + 456 + ], + "spans": [ + { + "bbox": [ + 104, + 434, + 504, + 456 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 434, + 504, + 456 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 104, + 434, + 504, + 456 + ], + "type": "text", + "content": " is a Transformer-based model parameterized by " + }, + { + "bbox": [ + 104, + 434, + 504, + 456 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 434, + 504, + 456 + ], + "type": "text", + "content": ". The training objective follows a maximum likelihood estimation framework:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 241, + 462, + 505, + 495 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 241, + 462, + 505, + 495 + ], + "spans": [ + { + "bbox": [ + 241, + 462, + 505, + 495 + ], + "type": "interline_equation", + "content": "\\mathcal {L} (\\theta) = - \\sum_ {i = 1} ^ {N} \\log P \\left(Y _ {i} \\mid X _ {i}; \\theta\\right). \\tag {2}", + "image_path": "a15e2c4c57738991c852f7620561e0657f371e891841162d89e0f33095bb0a07.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 509, + 504, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 509, + 504, + 531 + ], + "spans": [ + { + "bbox": [ + 104, + 509, + 504, + 531 + ], + "type": "text", + "content": "In our Audio-Reasoner, the input consists of an audio signal " + }, + { + "bbox": [ + 104, + 509, + 504, + 531 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 509, + 504, + 531 + ], + "type": "text", + "content": " and a text-based query " + }, + { + "bbox": [ + 104, + 509, + 504, + 531 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 509, + 504, + 531 + ], + "type": "text", + "content": ", forming the multimodal input representation:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 277, + 533, + 505, + 546 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 277, + 533, + 505, + 546 + ], + "spans": [ + { + "bbox": [ + 277, + 533, + 505, + 546 + ], + "type": "interline_equation", + "content": "X = (A, Q). \\tag {3}", + "image_path": "57276f2f5e14f3300c084c9f3c02a4fa47bf07f7febddea0828ad262d0b3bf8c.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 557, + 504, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 557, + 504, + 603 + ], + "spans": [ + { + "bbox": [ + 104, + 557, + 504, + 603 + ], + "type": "text", + "content": "Unlike conventional LLMs, where the output is a single response, we structure the model's output into two distinct components: the chain of thought reasoning " + }, + { + "bbox": [ + 104, + 557, + 504, + 603 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 557, + 504, + 603 + ], + "type": "text", + "content": ", which captures the step-by-step logical process, and the final response " + }, + { + "bbox": [ + 104, + 557, + 504, + 603 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 104, + 557, + 504, + 603 + ], + "type": "text", + "content": ", which provides the ultimate answer. The model thus learns to generate the concatenation of " + }, + { + "bbox": [ + 104, + 557, + 504, + 603 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 557, + 504, + 603 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 557, + 504, + 603 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 104, + 557, + 504, + 603 + ], + "type": "text", + "content": ", leading to the probability distribution:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 243, + 609, + 505, + 622 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 243, + 609, + 505, + 622 + ], + "spans": [ + { + "bbox": [ + 243, + 609, + 505, + 622 + ], + "type": "interline_equation", + "content": "P (C, R \\mid A, Q; \\theta) = f _ {\\theta} (A, Q). \\tag {4}", + "image_path": "8c9b158f3b6abdf8d4ff78ad93efe2469219954ab2846116d784c34533ee7bf7.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 636, + 504, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 636, + 504, + 658 + ], + "spans": [ + { + "bbox": [ + 104, + 636, + 504, + 658 + ], + "type": "text", + "content": "To ensure explicit learning of both reasoning and final response generation, we construct a dataset defined as:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 247, + 659, + 505, + 673 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 247, + 659, + 505, + 673 + ], + "spans": [ + { + "bbox": [ + 247, + 659, + 505, + 673 + ], + "type": "interline_equation", + "content": "\\mathcal {D} = \\left\\{\\left(A _ {i}, Q _ {i}, C _ {i}, R _ {i}\\right) \\right\\} _ {i = 1} ^ {N}, \\tag {5}", + "image_path": "e9d84268695a0b78837aa55b2f3c2c61547d2083f785d6975aa3b5eb5b3672ba.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "text", + "content": "where each training sample consists of an input audio signal " + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "inline_equation", + "content": "A_{i}" + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "text", + "content": ", its corresponding textual query " + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "inline_equation", + "content": "Q_{i}" + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "text", + "content": ", the structured reasoning process " + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "inline_equation", + "content": "C_{i}" + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "text", + "content": ", and the final answer " + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "inline_equation", + "content": "R_{i}" + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "text", + "content": ". This dataset formulation reinforces the model's ability to perform in-context learning and deep reasoning, ensuring that generated responses are not only accurate but also logically structured." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "text", + "content": "The training objective maximizes the likelihood of both " + }, + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "text", + "content": ", encouraging the model to first reason and then generate a response. The loss function is given by:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 225, + 101, + 505, + 133 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 225, + 101, + 505, + 133 + ], + "spans": [ + { + "bbox": [ + 225, + 101, + 505, + 133 + ], + "type": "interline_equation", + "content": "\\mathcal {L} (\\theta) = - \\sum_ {i = 1} ^ {N} \\log P \\left(C _ {i}, R _ {i} \\mid A _ {i}, Q _ {i}; \\theta\\right). \\tag {6}", + "image_path": "022bab20a6c17092521112a27333b46faee53c564133aacfc0d98298fd6693ce.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 143, + 504, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 143, + 504, + 177 + ], + "spans": [ + { + "bbox": [ + 104, + 143, + 504, + 177 + ], + "type": "text", + "content": "By optimizing this objective, Audio-Reasoner is trained to articulate a structured reasoning process before providing its final response. This approach enhances interpretability, reliability, and alignment with human reasoning." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 181, + 506, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 181, + 506, + 270 + ], + "spans": [ + { + "bbox": [ + 104, + 181, + 506, + 270 + ], + "type": "text", + "content": "At inference-time, our Audio-Reasoner follows a structured reasoning pipeline, as illustrated in Figure 2. The reasoning process consists of four sequential steps: (1) Planning " + }, + { + "bbox": [ + 104, + 181, + 506, + 270 + ], + "type": "inline_equation", + "content": "(P)" + }, + { + "bbox": [ + 104, + 181, + 506, + 270 + ], + "type": "text", + "content": ": The model analyzes the query, identifies key problem components, and outlines the reasoning steps necessary to derive an answer. (2) Captioning " + }, + { + "bbox": [ + 104, + 181, + 506, + 270 + ], + "type": "inline_equation", + "content": "(C)" + }, + { + "bbox": [ + 104, + 181, + 506, + 270 + ], + "type": "text", + "content": ": Relevant multimodal content is extracted from the input, such as speech transcription, acoustic event detection, or context information. (3) Reasoning " + }, + { + "bbox": [ + 104, + 181, + 506, + 270 + ], + "type": "inline_equation", + "content": "(R)" + }, + { + "bbox": [ + 104, + 181, + 506, + 270 + ], + "type": "text", + "content": ": Based on the extracted content, the model performs structured, step-by-step reasoning. (4) Summary " + }, + { + "bbox": [ + 104, + 181, + 506, + 270 + ], + "type": "inline_equation", + "content": "(S)" + }, + { + "bbox": [ + 104, + 181, + 506, + 270 + ], + "type": "text", + "content": ": The model synthesizes its reasoning process into a final, concise, and accurate response. This structured inference process can be formalized as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 255, + 274, + 505, + 287 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 255, + 274, + 505, + 287 + ], + "spans": [ + { + "bbox": [ + 255, + 274, + 505, + 287 + ], + "type": "interline_equation", + "content": "P \\sim f _ {\\theta} (A, Q), \\tag {7}", + "image_path": "db6b162828ee79a43960ffd645413d25eccca75dbd5f88e30e55b91dc7072094.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 255, + 289, + 505, + 300 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 255, + 289, + 505, + 300 + ], + "spans": [ + { + "bbox": [ + 255, + 289, + 505, + 300 + ], + "type": "interline_equation", + "content": "C \\sim f _ {\\theta} (A, Q, P), \\tag {8}", + "image_path": "a0e3ced6cc0844321518594d653e2404e66ed1bfca82eb0fbadd1e18d34e8bcb.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 256, + 302, + 505, + 315 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 256, + 302, + 505, + 315 + ], + "spans": [ + { + "bbox": [ + 256, + 302, + 505, + 315 + ], + "type": "interline_equation", + "content": "R \\sim f _ {\\theta} (A, Q, P, C), \\tag {9}", + "image_path": "66aafb1ed24409692cab175d546583d2f46f15ad129f27cd09d25b40ea5fc1b8.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 257, + 316, + 505, + 328 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 257, + 316, + 505, + 328 + ], + "spans": [ + { + "bbox": [ + 257, + 316, + 505, + 328 + ], + "type": "interline_equation", + "content": "S \\sim f _ {\\theta} (A, Q, P, C, R). \\tag {10}", + "image_path": "db555cca80c118868a6de6422e9ddb832ef78c085a5076d8739dc1f9d357830e.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 339, + 507, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 339, + 507, + 396 + ], + "spans": [ + { + "bbox": [ + 104, + 339, + 507, + 396 + ], + "type": "text", + "content": "Compared to the direct-response counterpart [Chu et al., 2024], this approach provides two key advantages: Improved Interpretability—By explicitly modeling each reasoning step, the process becomes more transparent, making it easier to analyze and diagnose errors. Reduced Hallucinations—The structured reasoning pipeline mitigates speculative or incorrect responses, ensuring that outputs remain logically grounded." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 399, + 506, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 399, + 506, + 464 + ], + "spans": [ + { + "bbox": [ + 104, + 399, + 506, + 464 + ], + "type": "text", + "content": "Figure 2 illustrates the structured CoT reasoning process, highlighting how each stage contributes to the final response. This approach draws inspiration from recent advancements in symbolic reasoning and CoT training [Cui et al., 2024], which emphasize that zero-shot reasoning without training is less effective. Moreover, previous studies have shown that models tuned on native CoT data significantly outperform those trained on generic labels, especially in multimodal reasoning tasks [Guo et al., 2024, Wen et al., 2019]." + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 187, + 488, + 421, + 579 + ], + "blocks": [ + { + "bbox": [ + 181, + 473, + 428, + 484 + ], + "lines": [ + { + "bbox": [ + 181, + 473, + 428, + 484 + ], + "spans": [ + { + "bbox": [ + 181, + 473, + 428, + 484 + ], + "type": "text", + "content": "Table 1: Domains and tasks of our constructed CoTA dataset." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 187, + 488, + 421, + 579 + ], + "lines": [ + { + "bbox": [ + 187, + 488, + 421, + 579 + ], + "spans": [ + { + "bbox": [ + 187, + 488, + 421, + 579 + ], + "type": "table", + "html": "
DomainTaskDescription
SoundSound QASound question answering
SpeechSpeech QASpeech question answering
SERSpeaker emotion recognition
S2TTSpeech to text translation
MusicMusic QAMusic question answering
", + "image_path": "88d6d1d91a5fcae4c317dc397974c8a5e17fb49b38e93a72633130f64611aca7.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 592, + 345, + 605 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 592, + 345, + 605 + ], + "spans": [ + { + "bbox": [ + 105, + 592, + 345, + 605 + ], + "type": "text", + "content": "3.2 Systematic Data Preparation for Audio Reasoning" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 607, + 506, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 607, + 506, + 673 + ], + "spans": [ + { + "bbox": [ + 104, + 607, + 506, + 673 + ], + "type": "text", + "content": "Training the Audio-Reasoner model requires a high-quality, diverse, and multitask audio-based reasoning dataset. Our goal is to develop a scalable and effective data generation method that systematically transforms raw audio data and simple human-labeled annotations into structured reasoning tasks. The resulting CoTA dataset with 1.2 million samples, focusing on complex reasoning-based question-answering tasks, spans three domains—audio, speech, and music—as detailed in Table 1." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "content": "To achieve this, we design a structured data generation pipeline consisting of three key stages: (1) generating high-quality annotations and diverse questions, (2) constructing structured reasoning chains, and (3) performing comprehensive validation. The complete pipeline is illustrated in Figure 3. The following sections describe each stage in detail." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 71, + 504, + 277 + ], + "blocks": [ + { + "bbox": [ + 108, + 71, + 504, + 277 + ], + "lines": [ + { + "bbox": [ + 108, + 71, + 504, + 277 + ], + "spans": [ + { + "bbox": [ + 108, + 71, + 504, + 277 + ], + "type": "image", + "image_path": "cd2008c2e248f8239147184f97d37d4637b0f8e288fd39a3f6af8d50411851e3.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 211, + 286, + 398, + 300 + ], + "lines": [ + { + "bbox": [ + 211, + 286, + 398, + 300 + ], + "spans": [ + { + "bbox": [ + 211, + 286, + 398, + 300 + ], + "type": "text", + "content": "Figure 3: Multistage data generation pipeline." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 319, + 294, + 331 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 319, + 294, + 331 + ], + "spans": [ + { + "bbox": [ + 105, + 319, + 294, + 331 + ], + "type": "text", + "content": "3.2.1 Multistage Data Generation Pipeline" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 332, + 506, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 332, + 506, + 443 + ], + "spans": [ + { + "bbox": [ + 104, + 332, + 506, + 443 + ], + "type": "text", + "content": "Stage 1: Automated Annotation and Question-Answer Synthesis. We begin by using advanced external closed-source models to improve the existing simple human annotations into high-quality and logically coherent training data. While large language models can produce hallucinations in free-form generation, they are highly effective at structured, evidence-based content creation. We leverage this strength by instructing the model to sequentially describe the audio elements, enhancing its understanding of sound sources and speech context. Based on these enriched descriptions, the model generates a diverse set of questions, ranging from straightforward factual queries to complex logical reasoning problems. This process ensures that the dataset captures a broad spectrum of reasoning tasks, as summarized in Table 1. Besides, in Sec. A.1 we provide the prompt in deducing closed-source models to generate questions and corresponding answers." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 448, + 506, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 448, + 506, + 536 + ], + "spans": [ + { + "bbox": [ + 104, + 448, + 506, + 536 + ], + "type": "text", + "content": "Stage 2: Structured Reasoning Chain Construction. Next, we transform the generated question-answer pairs into structured reasoning chains. Given the limited development of CoT methodologies in the audio domain, we adopt a systematic approach to ensure inference stability. The model first plans and analyzes the questions, extracts key information from the captions, and formulates logical steps leading to the answer. To facilitate structured reasoning, we employ explicit step demarcations such as and , allowing the model to autonomously construct multi-step inference pathways. Sec. A.2 describes the prompt used for the structured reasoning chain construction process." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 541, + 507, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 541, + 507, + 597 + ], + "spans": [ + { + "bbox": [ + 104, + 541, + 507, + 597 + ], + "type": "text", + "content": "Stage 3: Quality Assurance and Dataset Validation. Finally, we subject the generated data to a rigorous review process. Using the raw audio input, Stage 1 annotations, and Stage 2 reasoning chains, the model assesses whether the generated content is accurate, coherent, and suitable for inclusion in the final dataset. This step ensures the overall quality and reliability of the CoTA dataset. Sec. A.3 illustrates prompt used for filtering low-quality contents." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 603, + 388, + 615 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 603, + 388, + 615 + ], + "spans": [ + { + "bbox": [ + 105, + 603, + 388, + 615 + ], + "type": "text", + "content": "3.2.2 Task Taxonomy: Categories and Reasoning Methodologies" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 616, + 504, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 616, + 504, + 640 + ], + "spans": [ + { + "bbox": [ + 104, + 616, + 504, + 640 + ], + "type": "text", + "content": "The CoTA dataset encompasses a range of reasoning-based tasks, each requiring distinct reasoning path that the model should grasp. These include:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 645, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 645, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 645, + 506, + 723 + ], + "type": "text", + "content": "(1) Sound-Based Question Answering: The model identifies and analyzes sound characteristics, contextualizing them within the user's query to derive a reasoned response. (2) Speech-Based Question Answering: The model recognizes speaker timbres, transcribes speech content, and incrementally processes the question to determine the appropriate answer. (3) Speech Emotion Recognition (SER) and Speech-to-Text Translation (S2TT): These specialized tasks require the model to integrate speech recognition with emotion analysis and language translation, forming a structured reasoning process. (4) Music-Based Question Answering: As music is highly abstract, the" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 107, + 109, + 504, + 251 + ], + "blocks": [ + { + "bbox": [ + 104, + 69, + 504, + 102 + ], + "lines": [ + { + "bbox": [ + 104, + 69, + 504, + 102 + ], + "spans": [ + { + "bbox": [ + 104, + 69, + 504, + 102 + ], + "type": "text", + "content": "Table 2: Composition of our CoTA Dataset. We consider Google Gemini [Team et al., 2024] to build the reasoning ability in CoTA. Note that Multi-Speaker and Complex Audio datasets are manually synthesized, details of which can be referred to Sec. B." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 109, + 504, + 251 + ], + "lines": [ + { + "bbox": [ + 107, + 109, + 504, + 251 + ], + "spans": [ + { + "bbox": [ + 107, + 109, + 504, + 251 + ], + "type": "table", + "html": "
CategoryDataset SourceMain Skills LearningModel UsedQuantityPercentageSynthetic
SpeechMulti-SpeakerMulti-speaker Speech QAgemini-2.0-flash117.4k12.09%Yes
MELD [Poria et al., 2019]Speech Emotion QAgemini-2.0-pro-exp29.2k3.01%No
CoVoST2 [Wang et al., 2021]Speech-to-Text Translationgemini-2.0-flash224.6k23.13%No
Mustard [Castro et al., 2019]Sarcasm Detectiongemini-2.0-pro-exp1k0.10%No
MusicMusicBench [Melechovsky et al., 2024]Music QAgemini-2.0-flash137.1k14.12%No
SoundAudioSet [Gemmeke et al., 2017]Sound QAgemini-2.0-flash315.2k32.46%No
Clotho [Drossos et al., 2020]Sound QAgemini-2.0-pro-exp9.3k0.93%No
AudioCaps [Kim et al., 2019]Sound QAgemini-2.0-flash117.5k12.10%No
Complex AudioComplex Audio QAgemini-2.0-flash20k2.06%Yes
", + "image_path": "3ec1e7650c3c779d4d0d24f9cb1ea3e3f0794e2a588ef85c7281f11c5d6a2d1f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 271, + 504, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 271, + 504, + 307 + ], + "spans": [ + { + "bbox": [ + 104, + 271, + 504, + 307 + ], + "type": "text", + "content": "model first analyzes fundamental attributes such as tonality, tempo, and emotion before progressing to genre classification and deeper inferential reasoning based on the user's query. A comprehensive breakdown of the dataset's task categories and reasoning pathways is provided in Table 1." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 312, + 253, + 324 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 312, + 253, + 324 + ], + "spans": [ + { + "bbox": [ + 105, + 312, + 253, + 324 + ], + "type": "text", + "content": "3.2.3 Conclusion and Next Steps" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 327, + 506, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 327, + 506, + 392 + ], + "spans": [ + { + "bbox": [ + 104, + 327, + 506, + 392 + ], + "type": "text", + "content": "In summary, we have introduced a systematic data generation pipeline that ensures the creation of high-quality, structured reasoning data for the Audio-Reasoner model. Our approach involves enriching raw audio data with detailed annotations, generating diverse questions, constructing explicit reasoning chains, and implementing a comprehensive validation framework. The following section provides an in-depth analysis of the final CoTA dataset and its reasoning capabilities, with a detailed statistical overview presented in Table 2." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 402, + 230, + 414 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 402, + 230, + 414 + ], + "spans": [ + { + "bbox": [ + 105, + 402, + 230, + 414 + ], + "type": "text", + "content": "3.3 CoTA Dataset Analysis" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 417, + 504, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 417, + 504, + 473 + ], + "spans": [ + { + "bbox": [ + 104, + 417, + 504, + 473 + ], + "type": "text", + "content": "To evaluate the quality and reasoning efficacy of the CoTA dataset, we analyze its design from two key perspectives: (1) comprehensive audio coverage, ensuring broad representation across real-world and synthetic scenarios, and (2) scalability of reasoning complexity, which aligns task difficulty with structured inference patterns. Together, these aspects address critical gaps in audio-language pretraining." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 478, + 506, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 478, + 506, + 545 + ], + "spans": [ + { + "bbox": [ + 104, + 478, + 506, + 545 + ], + "type": "text", + "content": "Comprehensive Audio Coverage. CoTA integrates three audio domains—speech (38.33%), music (14.12%), and environmental sounds (47.55%)—ensuring diverse and representative coverage of real-world auditory contexts. This multi-domain structure captures a wide spectrum of acoustic phenomena, ranging from conversational speech (e.g., speech-to-text translation tasks in CoVoST 2) to intricate musical structures (MusicBench) and fine-grained environmental sound analysis (e.g., AudioSet's rich descriptions of acoustic environments)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 550, + 506, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 550, + 506, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 550, + 506, + 628 + ], + "type": "text", + "content": "A distinctive feature of CoTA is its hybrid synthetic-real composition, where synthetic data (Multi-Speaker and Complex Audio, " + }, + { + "bbox": [ + 104, + 550, + 506, + 628 + ], + "type": "inline_equation", + "content": "14.15\\%" + }, + { + "bbox": [ + 104, + 550, + 506, + 628 + ], + "type": "text", + "content": " of total samples) is strategically incorporated to enhance complex reasoning tasks, such as multi-step logical inference in Complex Audio. Meanwhile, the majority of the dataset is derived from high-quality real-world sources (e.g., MELD for emotion recognition). By unifying tasks across 10 distinct categories, spanning from fundamental classification to advanced tasks like translation and irony detection, CoTA facilitates a hierarchical learning process—an aspect largely absent in traditional datasets constrained to simple labeling tasks." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 634, + 507, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 634, + 507, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 634, + 507, + 723 + ], + "type": "text", + "content": "Scalability of Reasoning Complexity. The word count distribution in the CoTA dataset highlights the model's capacity for long-chain reasoning. As shown in Figure 4, most responses fall between 300 and 500 words, allowing for nuanced reasoning and detailed logical steps, particularly in audio and music question answering. This extended length supports transparency in reasoning, ensuring a thorough exploration of complex ideas. For more demanding tasks, such as those in the Multi-Speaker dataset, responses can reach up to 1,500 words. This increase reflects the model's ability to systematically break down intricate problems, demonstrating adaptive reasoning in scenarios requiring a deep understanding of multiple interacting elements." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 72, + 240, + 135 + ], + "blocks": [ + { + "bbox": [ + 111, + 72, + 240, + 135 + ], + "lines": [ + { + "bbox": [ + 111, + 72, + 240, + 135 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 240, + 135 + ], + "type": "image", + "image_path": "c632a41b5a7f412a8c682f3bc6d71109768d4b1c43941df87876b341f545f899.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 242, + 72, + 367, + 135 + ], + "blocks": [ + { + "bbox": [ + 242, + 72, + 367, + 135 + ], + "lines": [ + { + "bbox": [ + 242, + 72, + 367, + 135 + ], + "spans": [ + { + "bbox": [ + 242, + 72, + 367, + 135 + ], + "type": "image", + "image_path": "0bd2aa1ff7c3048ba77b507121a9d0abf1df8f3b78910baab538684bec5b6143.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 369, + 72, + 498, + 135 + ], + "blocks": [ + { + "bbox": [ + 369, + 72, + 498, + 135 + ], + "lines": [ + { + "bbox": [ + 369, + 72, + 498, + 135 + ], + "spans": [ + { + "bbox": [ + 369, + 72, + 498, + 135 + ], + "type": "image", + "image_path": "0b63d5816c7d0317a054224a41f1c67e98fb31529cf58d18f1df86ff86ef0e74.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 111, + 135, + 240, + 195 + ], + "blocks": [ + { + "bbox": [ + 111, + 135, + 240, + 195 + ], + "lines": [ + { + "bbox": [ + 111, + 135, + 240, + 195 + ], + "spans": [ + { + "bbox": [ + 111, + 135, + 240, + 195 + ], + "type": "image", + "image_path": "fc8096f2ec175706b289de4b055035b882781920208ae3008150038fb4a696d5.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 242, + 135, + 368, + 195 + ], + "blocks": [ + { + "bbox": [ + 242, + 135, + 368, + 195 + ], + "lines": [ + { + "bbox": [ + 242, + 135, + 368, + 195 + ], + "spans": [ + { + "bbox": [ + 242, + 135, + 368, + 195 + ], + "type": "image", + "image_path": "2de0a197ee7589cf5a822ea1b183f833691d28cda1c03b177d528955bc25806c.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 369, + 135, + 498, + 195 + ], + "blocks": [ + { + "bbox": [ + 369, + 135, + 498, + 195 + ], + "lines": [ + { + "bbox": [ + 369, + 135, + 498, + 195 + ], + "spans": [ + { + "bbox": [ + 369, + 135, + 498, + 195 + ], + "type": "image", + "image_path": "dd3a7cdcf4bf1022e1ef3b362fc3dc39661fa746dfbd2244684f3c5d5d0bd44f.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 111, + 195, + 240, + 258 + ], + "blocks": [ + { + "bbox": [ + 111, + 195, + 240, + 258 + ], + "lines": [ + { + "bbox": [ + 111, + 195, + 240, + 258 + ], + "spans": [ + { + "bbox": [ + 111, + 195, + 240, + 258 + ], + "type": "image", + "image_path": "d6eae35fa287899c4c41075663ba130ae8f203ee0f1b708b9020787659e4450b.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 266, + 506, + 300 + ], + "lines": [ + { + "bbox": [ + 104, + 266, + 506, + 300 + ], + "spans": [ + { + "bbox": [ + 104, + 266, + 506, + 300 + ], + "type": "text", + "content": "Figure 4: The bar chart shows the data length distribution across nine CoTA sub-datasets, with intervals of 100 on the horizontal axis (0—1200) and proportions on the vertical axis (totaling " + }, + { + "bbox": [ + 104, + 266, + 506, + 300 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 104, + 266, + 506, + 300 + ], + "type": "text", + "content": "). A moving average trend line is overlaid." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 242, + 195, + 368, + 258 + ], + "blocks": [ + { + "bbox": [ + 242, + 195, + 368, + 258 + ], + "lines": [ + { + "bbox": [ + 242, + 195, + 368, + 258 + ], + "spans": [ + { + "bbox": [ + 242, + 195, + 368, + 258 + ], + "type": "image", + "image_path": "9116a5ec7b70fbbbb30739b8027d02e6fc477dfb33fb817358a70343cda2369e.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 369, + 195, + 498, + 258 + ], + "blocks": [ + { + "bbox": [ + 369, + 195, + 498, + 258 + ], + "lines": [ + { + "bbox": [ + 369, + 195, + 498, + 258 + ], + "spans": [ + { + "bbox": [ + 369, + 195, + 498, + 258 + ], + "type": "image", + "image_path": "44be2c35e1b0d98bd8e74345eeddadcfbb53fdc71670efecae6cebf702ece18c.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 322, + 504, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 322, + 504, + 389 + ], + "spans": [ + { + "bbox": [ + 104, + 322, + 504, + 389 + ], + "type": "text", + "content": "Conversely, simpler tasks like S2TT elicit more concise responses, typically between 100 and 200 words. This brevity prevents unnecessary elaboration, maintaining clarity and efficiency when a straightforward answer suffices. By adjusting response length based on task complexity, the model showcases its dynamic reasoning capabilities—balancing depth and precision to effectively address a wide range of analytical challenges. Sec. C introduces more evidence about CoTA's scalability of reasoning complexity." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 400, + 192, + 414 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 400, + 192, + 414 + ], + "spans": [ + { + "bbox": [ + 105, + 400, + 192, + 414 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 419, + 216, + 431 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 419, + 216, + 431 + ], + "spans": [ + { + "bbox": [ + 105, + 419, + 216, + 431 + ], + "type": "text", + "content": "4.1 Experimental Setup" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 434, + 504, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 434, + 504, + 489 + ], + "spans": [ + { + "bbox": [ + 104, + 434, + 504, + 489 + ], + "type": "text", + "content": "Training Details. Our model, Audio-Reasoner, is built on Qwen2-Audio-Instruct [Chu et al., 2024], comprising a total of 8.4 billion parameters. The training process was conducted using the ms-swift framework [Zhao et al., 2024b], employing supervised fine-tuning with full-parameter tuning. We used a maximum learning rate of 1e-5 and trained the model for a single epoch across the entire CoTA dataset." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 496, + 506, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 496, + 506, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 496, + 506, + 628 + ], + "type": "text", + "content": "Evaluation Metric. Existing evaluation datasets in the audio domain primarily focus on simple classification tasks or BLEU[Papineni et al., 2002] score-based assessments of model responses. These approaches, however, fail to comprehensively reflect the reasoning capabilities of LALMs. To address this, our evaluation methodology incorporates multiple diverse benchmarks. First, we assessed the model's accuracy on closed-form questions using the MMAU-mini subset [Sakshi et al., 2024]. This subset was specifically chosen since the model's training data does not include multiple-choice questions. Additionally, we evaluated its real-world conversational abilities using the chat and foundation sections of AIR-Bench [Yang et al., 2024b]. These benchmarks encompass various audio modalities, including sound, speech, and music. Beyond these, we also tested the model's performance on traditional audio-related tasks such as speech-to-text translation (S2TT) on CoVoST 2 [Wang et al., 2021] and speech emotion recognition (SER) on MELD dataset [Poria et al., 2019]. A comprehensive summary of the evaluation tasks and datasets is presented in Table 3." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "type": "text", + "content": "Baselines: We primarily select state-of-the-art large audio language models as the baselines for comparison. These include the closed-source models Gemini-1.5-pro [Team et al., 2024], GPT-4o [Hurst et al., 2024], Qwen-audio-turbo [Chu et al., 2023], as well as the open-source models SALMONN [Tang et al., 2023], Qwen-Audio-Chat [Chu et al., 2023], and Qwen2-AudioInstruct [Chu et al., 2024] that also serves as the base model. Additionally, we compared cascade model approaches such as Whisper [Radford et al., 2023] + GPT-4 [Achiam et al., 2023] and a series of mainstream multimodal large language models. [Gong et al., 2023b,a, Kong et al., 2024, Ghosh et al., 2024, Liu et al., 2024b, Su et al., 2023, Wu et al., 2024, Wang et al., 2023, Zhang et al., 2023a]" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 108, + 87, + 504, + 326 + ], + "blocks": [ + { + "bbox": [ + 171, + 69, + 438, + 81 + ], + "lines": [ + { + "bbox": [ + 171, + 69, + 438, + 81 + ], + "spans": [ + { + "bbox": [ + 171, + 69, + 438, + 81 + ], + "type": "text", + "content": "Table 3: Summary of evaluation benchmarks for Audio-Reasoner." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 108, + 87, + 504, + 326 + ], + "lines": [ + { + "bbox": [ + 108, + 87, + 504, + 326 + ], + "spans": [ + { + "bbox": [ + 108, + 87, + 504, + 326 + ], + "type": "table", + "html": "
DatasetSplitTaskMetric
MMAU-miniSoundSound QAACC
SpeechSpeech QAACC
MusicMusic QAACC
AIR-BenchChat-SoundSound QAGPT-4 Eval
Chat-SpeechSpeech QAGPT-4 Eval
Chat-MusicMusic QAGPT-4 Eval
Chat-MixedAudioComplex Audio QAGPT-4 Eval
Foundation-SoundAQASound QAACC
Foundation-SERSpeech QAACC
Foundation-SICSpeaker Intent ClassificationACC
Foundation-SNVSpeaker Number VerificationACC
Foundation-MusicAQAMusic QAACC
CoVoST 2TestSpeech-to-Text Translation (S2TT)BLEU
MELDTestSpeech Emotion Recognition (SER)ACC
", + "image_path": "c3e3427d62e128e51a15ee3ca40b87c9a1ad286d1b71c5c6a39e8ff92cd8525c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 346, + 188, + 357 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 346, + 188, + 357 + ], + "spans": [ + { + "bbox": [ + 105, + 346, + 188, + 357 + ], + "type": "text", + "content": "4.2 Main Results" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 361, + 506, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 361, + 506, + 418 + ], + "spans": [ + { + "bbox": [ + 104, + 361, + 506, + 418 + ], + "type": "text", + "content": "To evaluate the effectiveness of Audio-Reasoner, we compare its performance against both closed-source and open-source baselines on benchmark datasets of MMAU-mini and AIR-Bench chat/foundation, CoVoST 2 (zn/en subset) and MELD. The results in Tables 4, 5, 6, 7 and 8 clearly demonstrate that Audio-Reasoner significantly outperforms existing models, setting a new state-of-the-art in audio reasoning tasks." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 113, + 456, + 496, + 654 + ], + "blocks": [ + { + "bbox": [ + 104, + 426, + 506, + 450 + ], + "lines": [ + { + "bbox": [ + 104, + 426, + 506, + 450 + ], + "spans": [ + { + "bbox": [ + 104, + 426, + 506, + 450 + ], + "type": "text", + "content": "Table 4: Performance comparison on MMAU-mini. The " + }, + { + "bbox": [ + 104, + 426, + 506, + 450 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{so},\\mathbf{mu},\\mathbf{sp}\\}" + }, + { + "bbox": [ + 104, + 426, + 506, + 450 + ], + "type": "text", + "content": " indicates whether \"sound\", \"music\", and \"speech\" have been used in training." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 113, + 456, + 496, + 654 + ], + "lines": [ + { + "bbox": [ + 113, + 456, + 496, + 654 + ], + "spans": [ + { + "bbox": [ + 113, + 456, + 496, + 654 + ], + "type": "table", + "html": "
ModelSize{so, mu, sp}SoundMusicSpeechAvg
Closed-Source
gpt4o + caption----63.3660.7753.1557.30
gemini-1.5-pro----56.7549.4058.5554.90
Open-Source
LTU7BY Y N22.529.6917.7116.89
LTU-AS7BY Y Y23.359.1020.6017.68
Audio Flamingo - Chat2.2BY Y N23.4215.2611.4116.69
GAMA7BY Y N41.4432.3318.9130.90
GAMA-IT7BY Y N43.2428.4418.9130.20
MU-LLaMA7BN Y N40.8432.6322.2231.90
SALMONN13BY Y Y41.0034.8025.5033.70
Qwen-audio-Chat8.4BY Y Y55.2544.0030.0343.10
Qwen2-Audio-Instruct8.4BY Y Y54.9550.9842.0449.20
Ours
Audio-Reasoner8.4BY Y Y60.0664.3060.7061.71
", + "image_path": "4cdc42b2122ada9d534b56954388b22fa1a2e3cfc283d42019d98a9267293df7.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 666, + 507, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 666, + 507, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 666, + 507, + 723 + ], + "type": "text", + "content": "Performance on MMAU-mini. MMAU-mini in Table 4 assesses multimodal audio understanding across three major domains: sound, music, and speech. We first make a comparison with closed-source models. Audio-Reasoner achieves the highest overall score (61.71%) outperforming GPT-4o (57.30%) and Gemini-1.5-Pro (54.90%). Music reasoning shows the most significant improvement—Audio-Reasoner: 64.30%, GPT-4o 60.77% and Gemini-1.5-Pro: 49.40%. This indicates superior" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 135, + 86, + 474, + 288 + ], + "blocks": [ + { + "bbox": [ + 171, + 69, + 438, + 81 + ], + "lines": [ + { + "bbox": [ + 171, + 69, + 438, + 81 + ], + "spans": [ + { + "bbox": [ + 171, + 69, + 438, + 81 + ], + "type": "text", + "content": "Table 5: Performance comparison on AIR-Bench chat benchmark." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 86, + 474, + 288 + ], + "lines": [ + { + "bbox": [ + 135, + 86, + 474, + 288 + ], + "spans": [ + { + "bbox": [ + 135, + 86, + 474, + 288 + ], + "type": "table", + "html": "
ModelSoundMusicSpeechMixed AudioAverage
Closed-Source
Whisper+GPT4--7.54-7.54
Qwen-Audio-Turbo6.595.987.045.776.34
Gemini-1.5-pro5.495.066.975.275.70
Open-Source
Macaw-LLM1.010.910.971.001.01
SpeechGPT0.950.951.571.141.15
Next-gpt4.764.183.862.924.13
Pandagpt5.465.063.582.934.25
BLSP5.555.086.174.525.33
Qwen-Audio6.955.526.475.386.08
SALMONN6.285.956.166.086.11
Qwen2-Audio-Instruct6.996.797.186.776.93
Ours
Audio-Reasoner7.688.058.196.657.94
", + "image_path": "495d72673fc5923d3356f92e45a94f3c487f2f71c1b2b4eb8e0ea4f432c28a19.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 307, + 506, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 307, + 506, + 396 + ], + "spans": [ + { + "bbox": [ + 104, + 307, + 506, + 396 + ], + "type": "text", + "content": "musical structure comprehension, enabled by CoTA's diverse music-based tasks. Speech-based reasoning is also notably strong—Audio-Reasoner: " + }, + { + "bbox": [ + 104, + 307, + 506, + 396 + ], + "type": "inline_equation", + "content": "60.70\\%" + }, + { + "bbox": [ + 104, + 307, + 506, + 396 + ], + "type": "text", + "content": ", GPT-4o: " + }, + { + "bbox": [ + 104, + 307, + 506, + 396 + ], + "type": "inline_equation", + "content": "53.15\\%" + }, + { + "bbox": [ + 104, + 307, + 506, + 396 + ], + "type": "text", + "content": " and Gemini-1.5-Pro: " + }, + { + "bbox": [ + 104, + 307, + 506, + 396 + ], + "type": "inline_equation", + "content": "58.55\\%" + }, + { + "bbox": [ + 104, + 307, + 506, + 396 + ], + "type": "text", + "content": ". This validates CoTA's impact in training models for context-dependent spoken language understanding. In comparison with open-source models, Audio-Reasoner surpasses all open-source models, with the next-best, Qwen2-Audio-Instruct, trailing by 12.51 percentage points " + }, + { + "bbox": [ + 104, + 307, + 506, + 396 + ], + "type": "inline_equation", + "content": "(49.20\\%)" + }, + { + "bbox": [ + 104, + 307, + 506, + 396 + ], + "type": "text", + "content": ". Across individual domains, Audio-Reasoner achieves " + }, + { + "bbox": [ + 104, + 307, + 506, + 396 + ], + "type": "inline_equation", + "content": "60.06\\%" + }, + { + "bbox": [ + 104, + 307, + 506, + 396 + ], + "type": "text", + "content": " in sound reasoning (beating Qwen2-Audio's " + }, + { + "bbox": [ + 104, + 307, + 506, + 396 + ], + "type": "inline_equation", + "content": "54.95\\%" + }, + { + "bbox": [ + 104, + 307, + 506, + 396 + ], + "type": "text", + "content": "), " + }, + { + "bbox": [ + 104, + 307, + 506, + 396 + ], + "type": "inline_equation", + "content": "64.30\\%" + }, + { + "bbox": [ + 104, + 307, + 506, + 396 + ], + "type": "text", + "content": " in music (outperforming Qwen2-Audio's " + }, + { + "bbox": [ + 104, + 307, + 506, + 396 + ], + "type": "inline_equation", + "content": "50.98\\%" + }, + { + "bbox": [ + 104, + 307, + 506, + 396 + ], + "type": "text", + "content": "), and " + }, + { + "bbox": [ + 104, + 307, + 506, + 396 + ], + "type": "inline_equation", + "content": "60.70\\%" + }, + { + "bbox": [ + 104, + 307, + 506, + 396 + ], + "type": "text", + "content": " in speech (exceeding Qwen2-Audio's " + }, + { + "bbox": [ + 104, + 307, + 506, + 396 + ], + "type": "inline_equation", + "content": "42.04\\%" + }, + { + "bbox": [ + 104, + 307, + 506, + 396 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 400, + 506, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 400, + 506, + 510 + ], + "spans": [ + { + "bbox": [ + 104, + 400, + 506, + 510 + ], + "type": "text", + "content": "Performance on AIR-Bench chat. (1) chat benchmark. AIR-Bench chat in Table 5 evaluates contextual and conversational reasoning across four audio types: sound, music, speech, and mixed audio. Regarding closed-source models, Audio-Reasoner achieves the highest overall score (7.94), outperforming Gemini-1.5-Pro (5.70) and Whisper+GPT-4 (7.54). It shows the most significant improvements in music (8.05) and speech (8.19). Additionally, its mixed audio reasoning score (6.65) demonstrates proficiency in handling multi-source audio tasks. As for comparison with open-source models, Audio-Reasoner sets a new benchmark, surpassing Qwen2-Audio (6.93) by 1.01 points. Across domains, it achieves 7.68 in sound (beating Qwen2-Audio's 6.99), 8.05 in music (exceeding Qwen2-Audio's 6.79), and 8.19 in speech (outperforming Qwen-2-Audio's 7.18), showcasing balanced expertise." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 111, + 536, + 496, + 721 + ], + "blocks": [ + { + "bbox": [ + 157, + 520, + 452, + 533 + ], + "lines": [ + { + "bbox": [ + 157, + 520, + 452, + 533 + ], + "spans": [ + { + "bbox": [ + 157, + 520, + 452, + 533 + ], + "type": "text", + "content": "Table 6: Performance comparison on AIR-Bench foundation benchmark." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 111, + 536, + 496, + 721 + ], + "lines": [ + { + "bbox": [ + 111, + 536, + 496, + 721 + ], + "spans": [ + { + "bbox": [ + 111, + 536, + 496, + 721 + ], + "type": "table", + "html": "
ModelAIR-SoundAIR-MusicAIR-SpeechAverage
SoundAQAMusicAQASERSICSNV
Closed-Source
whisper+GPT4--59.587.730.059.1
Qwen-Audio-Turbo62.862.560.056.454.359.2
Open-Source
NEXT-GPT18.847.125.725.625.428.5
SpeechGPT33.931.337.645.832.636.2
BLSP36.131.027.446.628.133.8
PandaGPT48.750.726.028.543.239.4
SALMONN28.454.629.936.734.336.8
Qwen-Audio-Chat64.648.243.277.835.353.8
Ours
Audio-Reasoner65.755.260.588.156.365.2
", + "image_path": "0b50cd8936feda0adcc0ab791100fca33a52a65b976782522081f048a2a42f41.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 506, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 506, + 237 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 506, + 237 + ], + "type": "text", + "content": "(2) foundation benchmark. AIR-Bench foundation in Table 6 evaluates fundamental audio understanding across three primary categories: sound, music, and speech, with speech further divided into three subdomains: Speech Emotion Recognition (SER), Speaker Identification and Classification (SIC), and Speech Number Variation (SNV). Audio-Reasoner achieves the highest overall score (65.2), outperforming both closed-source and open-source baselines. Compared to the strongest closed-source model, Qwen-Audio-Turbo (59.2), Audio-Reasoner leads by 6.0 points, demonstrating superior reasoning across all audio domains. With the sound category, Audio-Reasoner attains 65.7, surpassing Qwen-Audio-Chat (64.6) and Qwen-Audio-Turbo (62.8), highlighting its strong ability in environmental and non-speech audio understanding. For music reasoning, Audio-Reasoner achieves 55.2, significantly outperforming Qwen-Audio-Turbo (48.2) and all open-source baselines, indicating better comprehension of musical structures and attributes. Regarding speech reasoning, Audio-Reasoner sets new state-of-the-art results across all subdomains. It attains 60.5 in SER (v.s. Qwen-Audio-Turbo's 60.0), 88.1 in SIC (surpassing Whisper+GPT-4's 87.7), and 56.3 in SNV (exceeding Qwen-Audio-Turbo's 54.3). The substantial lead in SIC showcases its exceptional speaker recognition capability, benefiting from CoTA's step-by-step reasoning process." + } + ] + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 107, + 274, + 502, + 374 + ], + "blocks": [ + { + "bbox": [ + 104, + 246, + 506, + 269 + ], + "lines": [ + { + "bbox": [ + 104, + 246, + 506, + 269 + ], + "spans": [ + { + "bbox": [ + 104, + 246, + 506, + 269 + ], + "type": "text", + "content": "Table 7: Performance comparison of the speech-to-text translation (S2TT) task on CoVoST 2 dataset. We consider the mutual conversion between Chinese and English as training and evaluation data." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 274, + 502, + 374 + ], + "lines": [ + { + "bbox": [ + 107, + 274, + 502, + 374 + ], + "spans": [ + { + "bbox": [ + 107, + 274, + 502, + 374 + ], + "type": "table", + "html": "
ModelEN-ZNZN-ENAvg
BLEU1BLEU2BLEU3BLEU4AvgBLEU1BLEU2BLEU3BLEU4Avg
Closed-Source
Gemini-1.5-pro68.2549.1237.8129.7946.2451.8326.6116.2710.8826.3936.32
Open-Source
Qwen2-Audio-Instruct58.6339.5528.7121.4037.0748.5224.3114.659.2424.1830.63
Ours
Audio-Reasoner72.8954.1742.4633.9550.8756.5029.9918.3711.6229.1340.00
", + "image_path": "05c185e32b1c6a4fde3834169001890bcc75c06534623c92190fb89b5e332d73.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 384, + 506, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 384, + 506, + 430 + ], + "spans": [ + { + "bbox": [ + 104, + 384, + 506, + 430 + ], + "type": "text", + "content": "Performance on CoVoST 2. The CoVoST 2 dataset in Table 7 evaluates speech-to-text translation, a fundamental task in cross-lingual speech understanding. Audio-Reasoner demonstrates the strengths of Audio-Reasoner in speech-to-text translation across both English-to-Chinese (EN-ZN) and Chinese-to-English (ZN-EN) tasks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 434, + 506, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 434, + 506, + 533 + ], + "spans": [ + { + "bbox": [ + 104, + 434, + 506, + 533 + ], + "type": "text", + "content": "For EN-ZN translation, Audio-Reasoner outperforms both closed-source Gemini-1.5-pro and open-source Qwen2-Audio-Instruct. With an average BLEU score of 50.87, it surpasses Gemini-1.5-pro's score of 46.24 by 4.63 points and Qwen2-Audio-Instruct's 37.07 by a significant 13.80 points. Audio-Reasoner's BLEU-4 score of 33.95 highlights its ability to generate fluent, high-quality translations, even for more complex sentence structures. In the ZN-EN translation task, Audio-Reasoner continues to show superiority with an average BLEU score of 29.13, outperforming Gemini-1.5-pro (with a score of 26.39) by 2.74 points and Qwen2-Audio-Instruct (with a score of 24.18) by 4.95 points. Its BLEU-4 score of 11.62 reflects an enhanced ability to produce coherent and accurate translations, particularly in more challenging, longer sentences." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 537, + 504, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 537, + 504, + 561 + ], + "spans": [ + { + "bbox": [ + 104, + 537, + 504, + 561 + ], + "type": "text", + "content": "These results demonstrate that Audio-Reasoner excels in both capturing cross-lingual semantic alignment, consistently outperforming existing models in speech-to-text translation tasks." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 208, + 586, + 399, + 645 + ], + "blocks": [ + { + "bbox": [ + 105, + 569, + 503, + 582 + ], + "lines": [ + { + "bbox": [ + 105, + 569, + 503, + 582 + ], + "spans": [ + { + "bbox": [ + 105, + 569, + 503, + 582 + ], + "type": "text", + "content": "Table 8: Performance comparison of the speech emotion recognition (SER) task on MELD dataset." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 208, + 586, + 399, + 645 + ], + "lines": [ + { + "bbox": [ + 208, + 586, + 399, + 645 + ], + "spans": [ + { + "bbox": [ + 208, + 586, + 399, + 645 + ], + "type": "table", + "html": "
ModelUnweighted_ACC
EMO-box31.5
SALMONN39.2
Qwen2-Audio-Instruct49.9
Audio-Reasoner53.9
", + "image_path": "6ddeec26dc0b46ed82c9b7d248215cee84c766c54b928e445ab9205f13c4bba5.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 655, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 655, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 655, + 504, + 723 + ], + "type": "text", + "content": "Performance on MELD. The MELD dataset in Table 8 evaluates speech emotion recognition, a crucial task for understanding affective states in spoken language. Audio-Reasoner achieves the highest unweighted accuracy of 53.9, outperforming all baseline models, including the previous state-of-the-art Qwen2-Audio-Instruct (49.9) by 4.0 percentage points. The results further validate that Audio-Reasoner excels in emotion perception and contextual audio understanding, solidifying its effectiveness in real-world speech emotion recognition tasks." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 116, + 81, + 132, + 100 + ], + "blocks": [ + { + "bbox": [ + 116, + 81, + 132, + 100 + ], + "lines": [ + { + "bbox": [ + 116, + 81, + 132, + 100 + ], + "spans": [ + { + "bbox": [ + 116, + 81, + 132, + 100 + ], + "type": "image", + "image_path": "9322ca54bc2fde2f6f2c9939c1e41fb674388081d73e82a1630c156cc78330a8.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 159, + 336, + 449, + 348 + ], + "lines": [ + { + "bbox": [ + 159, + 336, + 449, + 348 + ], + "spans": [ + { + "bbox": [ + 159, + 336, + 449, + 348 + ], + "type": "text", + "content": "Figure 5: Example of Audio-Reasoner answering music-based question." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 143, + 83, + 465, + 100 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 83, + 465, + 100 + ], + "spans": [ + { + "bbox": [ + 143, + 83, + 465, + 100 + ], + "type": "text", + "content": "Analyze the interplay between the guitar riff and the spoken commentary. How do they contribute to the overall impression or potential purpose of the piece?" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 145, + 102, + 203, + 114 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 102, + 203, + 114 + ], + "spans": [ + { + "bbox": [ + 145, + 102, + 203, + 114 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 477, + 118, + 494, + 143 + ], + "blocks": [ + { + "bbox": [ + 477, + 118, + 494, + 143 + ], + "lines": [ + { + "bbox": [ + 477, + 118, + 494, + 143 + ], + "spans": [ + { + "bbox": [ + 477, + 118, + 494, + 143 + ], + "type": "image", + "image_path": "2037f12e5111931657463be8d6a3644c6f01c10ec8a126d8d0536b94ae5f6035.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 124, + 124, + 150, + 131 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 124, + 150, + 131 + ], + "spans": [ + { + "bbox": [ + 124, + 124, + 150, + 131 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 129, + 132, + 465, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 132, + 465, + 150 + ], + "spans": [ + { + "bbox": [ + 129, + 132, + 465, + 150 + ], + "type": "text", + "content": " The question asks about interplay and contribution. Analyze the music clip, focusing on: Characterizing the guitar riff: style, rhythm, melodic features. Characterizing the spoken commentary: tone, content, frequency. Analyzing their relationship: explain, praise, interrupt. Inferring the overall impression and purpose: instruction, demonstration, etc.? " + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 129, + 155, + 468, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 155, + 468, + 169 + ], + "spans": [ + { + "bbox": [ + 129, + 155, + 468, + 169 + ], + "type": "text", + "content": " The audio has a blues - rock guitar riff on a clean - toned electric guitar, moderate tempo, 4/4 time. There's male spoken commentary, informal and relaxed, suggesting instruction or informal performance." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 129, + 173, + 216, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 173, + 216, + 180 + ], + "spans": [ + { + "bbox": [ + 129, + 173, + 216, + 180 + ], + "type": "text", + "content": " Consider scenarios:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 129, + 180, + 211, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 180, + 211, + 186 + ], + "spans": [ + { + "bbox": [ + 129, + 180, + 211, + 186 + ], + "type": "text", + "content": "**Scenario 1: Instructional Context" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 129, + 186, + 461, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 186, + 461, + 198 + ], + "spans": [ + { + "bbox": [ + 129, + 186, + 461, + 198 + ], + "type": "text", + "content": "If commentary explains riff aspects (e.g., \"bend on third string\", \"E minor blues progression\"), it's instructional. Guitar demonstrates, commentary clarifies. If it gives positive reinforcement (\"very cool riff, man\"), it's a casual lesson or performance with teaching." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 129, + 199, + 249, + 206 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 199, + 249, + 206 + ], + "spans": [ + { + "bbox": [ + 129, + 199, + 249, + 206 + ], + "type": "text", + "content": "**Scenario 2: Informal Performance/Demonstration" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 129, + 206, + 466, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 206, + 466, + 219 + ], + "spans": [ + { + "bbox": [ + 129, + 206, + 466, + 219 + ], + "type": "text", + "content": "If commentary is minimal (reactions like \"yeah\", \"alright\"), it's an informal performance. Riffs simplicity and repetition suggest a demonstration for less - experienced musicians." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 129, + 220, + 469, + 234 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 220, + 469, + 234 + ], + "spans": [ + { + "bbox": [ + 129, + 220, + 469, + 234 + ], + "type": "text", + "content": "The audio leans towards scenario 1. \"very cool riff, man\" implies positive assessment for teaching or demonstration. Clean tone and moderate pace suggest instruction/demonstration, not a raw blues performance." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 129, + 234, + 424, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 234, + 424, + 240 + ], + "spans": [ + { + "bbox": [ + 129, + 234, + 424, + 240 + ], + "type": "text", + "content": "Refining: E minor blues with E - B chord progression and C# implies commentary helps demonstrate their use. " + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 129, + 247, + 462, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 247, + 462, + 266 + ], + "spans": [ + { + "bbox": [ + 129, + 247, + 462, + 266 + ], + "type": "text", + "content": " The blues - rock, moderate - tempo guitar riff is complemented by relaxed, informal spoken commentary. The interplay indicates a purpose of instruction or demonstration of the riff's construction. The positive comment supports an informal teaching or relaxed demonstration." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 124, + 267, + 152, + 274 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 267, + 152, + 274 + ], + "spans": [ + { + "bbox": [ + 124, + 267, + 152, + 274 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 124, + 275, + 165, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 275, + 165, + 283 + ], + "spans": [ + { + "bbox": [ + 124, + 275, + 165, + 283 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 129, + 283, + 468, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 283, + 468, + 303 + ], + "spans": [ + { + "bbox": [ + 129, + 283, + 468, + 303 + ], + "type": "text", + "content": "The interplay creates an impression of informal instruction or demonstration. The clean - toned guitar plays a moderate - tempo blues - rock riff, and the male voice gives relaxed commentary like \"very cool riff, man\". The riff's simplicity, moderate pace, and E minor blues elements suggest the purpose is to demonstrate the riff and how to play its features, not a formal performance or raw blues." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 124, + 304, + 167, + 311 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 304, + 167, + 311 + ], + "spans": [ + { + "bbox": [ + 124, + 304, + 167, + 311 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 378, + 506, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 378, + 506, + 533 + ], + "spans": [ + { + "bbox": [ + 104, + 378, + 506, + 533 + ], + "type": "text", + "content": "Why Audio-Reasoner Excels. While open-source models typically lag behind closed-source alternatives, Audio-Reasoner narrows this gap significantly. It outperforms Gemini-1.5-Pro in multiple categories and achieves competitive results against GPT-4o. A key factor driving Audio-Reasoner's state-of-the-art performance is the CoTA dataset, which provides an unparalleled foundation for comprehensive, structured, and context-aware audio reasoning. The dataset contributes to Audio-Reasoner's success in two critical ways: (1) CoTA's comprehensive audio reasoning data. Unlike models trained on fragmented or task-specific datasets, Audio-Reasoner benefits from CoTA's extensive and balanced coverage of sound, speech, and music-based reasoning. This ensures robust adaptation across diverse audio tasks, enhancing its ability to generalize across multimodal benchmarks. (2) CoTA's strong logical and contextual inference. CoTA's emphasis on structured reasoning and contextual awareness enables Audio-Reasoner to outperform existing models in long-form reasoning (MMAU-mini) and conversational audio understanding (AIR-Bench chat). By leveraging deep logical inference and nuanced context retention, it surpasses prior approaches in handling complex speech interactions and multimodal reasoning." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 547, + 504, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 547, + 504, + 581 + ], + "spans": [ + { + "bbox": [ + 104, + 547, + 504, + 581 + ], + "type": "text", + "content": "These strengths position Audio-Reasoner as a breakthrough in open-source audio intelligence, setting a new benchmark for multimodal reasoning in speech comprehension, translation, and real-world audio interpretation." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 599, + 178, + 611 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 599, + 178, + 611 + ], + "spans": [ + { + "bbox": [ + 105, + 599, + 178, + 611 + ], + "type": "text", + "content": "4.3 Case Study" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 618, + 506, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 618, + 506, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 618, + 506, + 696 + ], + "type": "text", + "content": "Here we show a case in Figure 5, demonstrating the audio-based reasoning capability of Audio-Reasoner. The system analyzes the interplay between a guitar riff and a spoken commentary, systematically breaking down their characteristics and relationship. It identifies key musical features, assesses the commentary's tone and intent, and infers the overall purpose of the piece. By considering different scenarios, Audio-Reasoner determines that the interplay suggests an informal instructional or demonstrative context rather than a formal performance. This example highlights the model's ability to extract meaningful insights from audio, combining musical analysis with contextual interpretation." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 711, + 324, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 711, + 324, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 711, + 324, + 723 + ], + "type": "text", + "content": "For more remarkable examples, please refer to Sec. D." + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 185, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 185, + 84 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 185, + 84 + ], + "type": "text", + "content": "5 Conclusion" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 90, + 506, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 90, + 506, + 157 + ], + "spans": [ + { + "bbox": [ + 104, + 90, + 506, + 157 + ], + "type": "text", + "content": "In this work, we introduced Audio-Reasoner, a large audio language model (LALM) designed to advance deep reasoning in audio-based tasks. By leveraging inference scaling and structured chain-of-thought (CoT) reasoning, we demonstrated significant performance improvements across key benchmarks. Central to our approach is CoTA, a large-scale, high-quality dataset containing around 1.2 million structured reasoning samples, which we generated through a systematic pipeline of annotation refinement, question synthesis, and CoT generation." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 161, + 506, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 161, + 506, + 217 + ], + "spans": [ + { + "bbox": [ + 104, + 161, + 506, + 217 + ], + "type": "text", + "content": "Our experimental results highlight the effectiveness of structured reasoning in the audio domain, achieving state-of-the-art performance on MMAU-mini " + }, + { + "bbox": [ + 104, + 161, + 506, + 217 + ], + "type": "inline_equation", + "content": "(+25.04\\%)" + }, + { + "bbox": [ + 104, + 161, + 506, + 217 + ], + "type": "text", + "content": ", CoVoST 2 " + }, + { + "bbox": [ + 104, + 161, + 506, + 217 + ], + "type": "inline_equation", + "content": "(+8.31\\%)" + }, + { + "bbox": [ + 104, + 161, + 506, + 217 + ], + "type": "text", + "content": ", and MELD " + }, + { + "bbox": [ + 104, + 161, + 506, + 217 + ], + "type": "inline_equation", + "content": "(+8.01\\%)" + }, + { + "bbox": [ + 104, + 161, + 506, + 217 + ], + "type": "text", + "content": ". These findings underscore the critical role of reasoning-rich datasets and inference scaling in multimodal learning, particularly for audio-based tasks where existing models struggle with complex reasoning." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 221, + 506, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 221, + 506, + 288 + ], + "spans": [ + { + "bbox": [ + 104, + 221, + 506, + 288 + ], + "type": "text", + "content": "Moving forward, we believe that structured CoT methodologies will play a crucial role in enhancing the reasoning capabilities of LALMs. Future research directions include extending Audio-Reasoner's capabilities to multi-turn dialogue reasoning, cross-modal knowledge integration, and real-world applications in speech-driven AI systems. By publicly releasing our model, dataset, and code, we aim to foster further innovation and collaboration in the field of audio reasoning, paving the way for more advanced and interpretable LALMs." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 298, + 165, + 310 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 298, + 165, + 310 + ], + "spans": [ + { + "bbox": [ + 105, + 298, + 165, + 310 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 316, + 507, + 723 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 106, + 316, + 507, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 316, + 507, + 350 + ], + "spans": [ + { + "bbox": [ + 106, + 316, + 507, + 350 + ], + "type": "text", + "content": "Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 357, + 505, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 357, + 505, + 402 + ], + "spans": [ + { + "bbox": [ + 106, + 357, + 505, + 402 + ], + "type": "text", + "content": "Cameron B Browne, Edward Powley, Daniel Whitehouse, Simon M Lucas, Peter I Cowling, Philipp Rohlfshagen, Stephen Tavener, Diego Perez, Spyridon Samothrakis, and Simon Colton. A survey of monte carlo tree search methods. IEEE Transactions on Computational Intelligence and AI in Games (T-CIAIG), (1):1-43, 2012." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 410, + 505, + 444 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 410, + 505, + 444 + ], + "spans": [ + { + "bbox": [ + 107, + 410, + 505, + 444 + ], + "type": "text", + "content": "Santiago Castro, Devamanyu Hazarika, Verónica Pérez-Rosas, Roger Zimmermann, Rada Mihalcea, and Soujanya Poria. Towards multimodal sarcasm detection. In Annual Meeting of the Association for Computational Linguistics (ACL), pages 4619-4629, 2019." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 451, + 504, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 451, + 504, + 485 + ], + "spans": [ + { + "bbox": [ + 106, + 451, + 504, + 485 + ], + "type": "text", + "content": "Yunfei Chu, Jin Xu, Xiaohuan Zhou, Qian Yang, Shiliang Zhang, Zhijie Yan, Chang Zhou, and Jingren Zhou. Qwen-audio: Advancing universal audio understanding via unified large-scale audio-language models. arXiv preprint arXiv:2311.07919, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 492, + 506, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 492, + 506, + 525 + ], + "spans": [ + { + "bbox": [ + 107, + 492, + 506, + 525 + ], + "type": "text", + "content": "Yunfei Chu, Jin Xu, Qian Yang, Haojie Wei, Xipin Wei, Zhifang Guo, Yichong Leng, Yuanjun Lv, Jinzheng He, Junyang Lin, et al. Qwen2-audio technical report. arXiv preprint arXiv:2407.10759, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 534, + 506, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 534, + 506, + 567 + ], + "spans": [ + { + "bbox": [ + 107, + 534, + 506, + 567 + ], + "type": "text", + "content": "Yingqian Cui, Pengfei He, Xianfeng Tang, Qi He, Chen Luo, Jiliang Tang, and Yue Xing. A theoretical understanding of chain-of-thought: Coherent reasoning and error-aware demonstration. arXiv preprint arXiv:2410.16540, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 575, + 506, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 575, + 506, + 609 + ], + "spans": [ + { + "bbox": [ + 107, + 575, + 506, + 609 + ], + "type": "text", + "content": "Alexandre Defossez, Laurent Mazare, Manu Orsini, Amelie Royer, Patrick Pérez, Hervé Jégou, Edouard Grave, and Neil Zeghidour. Moshi: a speech-text foundation model for real-time dialogue. arXiv preprint arXiv:2410.00037, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 616, + 504, + 640 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 616, + 504, + 640 + ], + "spans": [ + { + "bbox": [ + 107, + 616, + 504, + 640 + ], + "type": "text", + "content": "Yuntian Deng, Yejin Choi, and Stuart Shieber. From explicit cot to implicit cot: Learning to internalize cot step by step. arXiv preprint arXiv:2405.14838, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 647, + 506, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 647, + 506, + 680 + ], + "spans": [ + { + "bbox": [ + 106, + 647, + 506, + 680 + ], + "type": "text", + "content": "Konstantinos Drossos, Samuel Lipping, and Tuomas Virtanen. Clotho: An audio captioning dataset. In International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 736-740, 2020." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 689, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 689, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 106, + 689, + 506, + 723 + ], + "type": "text", + "content": "Zhihao Du, Yuxuan Wang, Qian Chen, Xian Shi, Xiang Lv, Tianyu Zhao, Zhifu Gao, Yexin Yang, Changfeng Gao, Hui Wang, et al. Cosyvoice 2: Scalable streaming speech synthesis with large language models. arXiv preprint arXiv:2412.10117, 2024." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "text", + "content": "Logan Engstrom, Andrew Ilyas, Shibani Santurkar, Dimitris Tsipras, Firdaus Janoos, Larry Rudolph, and Aleksander Madry. Implementation matters in deep policy gradients: A case study onppo and trpo. arXiv preprint arXiv:2005.12729, 2020." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 112, + 505, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 112, + 505, + 148 + ], + "spans": [ + { + "bbox": [ + 106, + 112, + 505, + 148 + ], + "type": "text", + "content": "Hao Fei, Shengqiong Wu, Wei Ji, Hanwang Zhang, Meishan Zhang, Mong Li Lee, and Wynne Hsu. Video-of-thought: step-by-step video reasoning from perception to cognition. In International Conference on Machine Learning (ICML), pages 13109-13125, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 154, + 504, + 189 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 154, + 504, + 189 + ], + "spans": [ + { + "bbox": [ + 107, + 154, + 504, + 189 + ], + "type": "text", + "content": "Chaoyou Fu, Haojia Lin, Xiong Wang, Yi-Fan Zhang, Yunhang Shen, Xiaoyu Liu, Yangze Li, Zuwei Long, Heting Gao, Ke Li, et al. Vita-1.5: Towards gpt-4o level real-time vision and speech interaction. arXiv preprint arXiv:2501.01957, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 194, + 506, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 194, + 506, + 239 + ], + "spans": [ + { + "bbox": [ + 107, + 194, + 506, + 239 + ], + "type": "text", + "content": "Jort F Gemmeke, Daniel PW Ellis, Dylan Freedman, Aren Jansen, Wade Lawrence, R Channing Moore, Manoj Plakal, and Marvin Ritter. Audio set: An ontology and human-labeled dataset for audio events. In International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 776-780. IEEE, 2017." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 247, + 506, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 247, + 506, + 293 + ], + "spans": [ + { + "bbox": [ + 105, + 247, + 506, + 293 + ], + "type": "text", + "content": "Sreyan Ghosh, Sonal Kumar, Ashish Seth, Chandra Kiran Reddy Evuru, Utkarsh Tyagi, S Sakshi, Oriol Nieto, Ramani Duraiswami, and Dinesh Manocha. Gama: A large audio-language model with advanced audio understanding and complex reasoning abilities. In Empirical Methods in Natural Language Processing, pages 6288-6313, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 298, + 506, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 298, + 506, + 334 + ], + "spans": [ + { + "bbox": [ + 107, + 298, + 506, + 334 + ], + "type": "text", + "content": "Yuan Gong, Alexander H Liu, Hongyin Luo, Leonid Karlinsky, and James Glass. Joint audio and speech understanding. In Automatic Speech Recognition and Understanding Workshop (ASRU), pages 1-8, 2023a." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 340, + 504, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 340, + 504, + 363 + ], + "spans": [ + { + "bbox": [ + 107, + 340, + 504, + 363 + ], + "type": "text", + "content": "Yuan Gong, Hongyin Luo, Alexander H Liu, Leonid Karlinsky, and James Glass. Listen, think, and understand. arXiv preprint arXiv:2305.10790, 2023b." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 370, + 505, + 404 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 370, + 505, + 404 + ], + "spans": [ + { + "bbox": [ + 107, + 370, + 505, + 404 + ], + "type": "text", + "content": "Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 411, + 505, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 411, + 505, + 446 + ], + "spans": [ + { + "bbox": [ + 107, + 411, + 505, + 446 + ], + "type": "text", + "content": "Jarvis Guo, Tuney Zheng, Yuelin Bai, Bo Li, Yubo Wang, King Zhu, Yizhi Li, Graham Neubig, Wenhu Chen, and Xiang Yue. Mammoth-vl: Eliciting multimodal reasoning with instruction tuning at scale. arXiv preprint arXiv:2412.05237, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 453, + 505, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 453, + 505, + 487 + ], + "spans": [ + { + "bbox": [ + 107, + 453, + 505, + 487 + ], + "type": "text", + "content": "Songhao Han, Wei Huang, Hairong Shi, Le Zhuo, Xiu Su, Shifeng Zhang, Xu Zhou, Xiaojuan Qi, Yue Liao, and Si Liu. Videospresso: A large-scale chain-of-thought dataset for fine-grained video reasoning via core frame selection. arXiv preprint arXiv:2411.14794, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 493, + 505, + 528 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 493, + 505, + 528 + ], + "spans": [ + { + "bbox": [ + 107, + 493, + 505, + 528 + ], + "type": "text", + "content": "Binyuan Hui, Jian Yang, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Lei Zhang, Tianyu Liu, Jiajun Zhang, Bowen Yu, Keming Lu, et al. Qwen2. 5-coder technical report. arXiv preprint arXiv:2409.12186, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 535, + 506, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 535, + 506, + 568 + ], + "spans": [ + { + "bbox": [ + 107, + 535, + 506, + 568 + ], + "type": "text", + "content": "Aaron Hurst, Adam Lerner, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 575, + 505, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 575, + 505, + 609 + ], + "spans": [ + { + "bbox": [ + 107, + 575, + 505, + 609 + ], + "type": "text", + "content": "Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 616, + 504, + 641 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 616, + 504, + 641 + ], + "spans": [ + { + "bbox": [ + 107, + 616, + 504, + 641 + ], + "type": "text", + "content": "Feihu Jin, Yifan Liu, and Ying Tan. Zero-shot chain-of-thought reasoning guided by evolutionary algorithms in large language models. arXiv preprint arXiv:2402.05376, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 647, + 504, + 681 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 647, + 504, + 681 + ], + "spans": [ + { + "bbox": [ + 107, + 647, + 504, + 681 + ], + "type": "text", + "content": "Chris Dongjoo Kim, Byeongchang Kim, Hyunmin Lee, and Gunhee Kim. Audiocaps: Generating captions for audios in the wild. In *Nations of the Americas* Chapter of the Association for Computational Linguistics (NAACL), pages 119-132, 2019." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 689, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 689, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 107, + 689, + 504, + 723 + ], + "type": "text", + "content": "Zhifeng Kong, Arushi Goel, Rohan Badlani, Wei Ping, Rafael Valle, and Bryan Catanzaro. Audio flamingo: A novel audio language model with few-shot learning and dialogue abilities. In International Conference on Machine Learning (ICML), pages 25125-25148, 2024." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 723 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "text", + "content": "Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024a." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 114, + 505, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 114, + 505, + 149 + ], + "spans": [ + { + "bbox": [ + 106, + 114, + 505, + 149 + ], + "type": "text", + "content": "Shansong Liu, Atin Sakkeer Hussain, Chenshuo Sun, and Ying Shan. Music understanding llama: Advancing text-to-music generation with question answering and captioning. In International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 286-290, 2024b." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 157, + 505, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 157, + 505, + 191 + ], + "spans": [ + { + "bbox": [ + 106, + 157, + 505, + 191 + ], + "type": "text", + "content": "Ziyang Ma, Mingjie Chen, Hezhao Zhang, Zhisheng Zheng, Wenxi Chen, Xiquan Li, Jiaxin Ye, Xie Chen, and Thomas Hain. Emobox: Multilingual multi-corpus speech emotion recognition toolkit and benchmark. arXiv preprint arXiv:2406.07162, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 199, + 505, + 233 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 199, + 505, + 233 + ], + "spans": [ + { + "bbox": [ + 106, + 199, + 505, + 233 + ], + "type": "text", + "content": "Ziyang Ma, Zhuo Chen, Yuping Wang, Eng Siong Chng, and Xie Chen. Audio-cot: Exploring chain-of-thought reasoning in large audio language model. arXiv preprint arXiv:2501.07246, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 241, + 505, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 241, + 505, + 285 + ], + "spans": [ + { + "bbox": [ + 106, + 241, + 505, + 285 + ], + "type": "text", + "content": "Jan Melechovsky, Zixun Guo, Deepanway Ghosal, Navonil Majumder, Dorien Herremans, and Soujanya Poria. Mustango: Toward controllable text-to-music generation. In *Nations of the Americas Chapter of the Association for Computational Linguistics (NAACL)*, pages 8286-8309, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 295, + 505, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 295, + 505, + 329 + ], + "spans": [ + { + "bbox": [ + 106, + 295, + 505, + 329 + ], + "type": "text", + "content": "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling. arXiv preprint arXiv:2501.19393, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 337, + 505, + 372 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 337, + 505, + 372 + ], + "spans": [ + { + "bbox": [ + 106, + 337, + 505, + 372 + ], + "type": "text", + "content": "Vassil Panayotov, Guoguo Chen, Daniel Povey, and Sanjeev Khudanpur. Librispeech: an asr corpus based on public domain audio books. In International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 5206-5210, 2015." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 380, + 505, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 380, + 505, + 415 + ], + "spans": [ + { + "bbox": [ + 106, + 380, + 505, + 415 + ], + "type": "text", + "content": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. Bleu: a method for automatic evaluation of machine translation. In Annual Meeting of the Association for Computational Linguistics (ACL), pages 311-318, 2002." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 422, + 505, + 457 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 422, + 505, + 457 + ], + "spans": [ + { + "bbox": [ + 106, + 422, + 505, + 457 + ], + "type": "text", + "content": "Soujanya Poria, Devamanyu Hazarika, Navonil Majumder, Gautam Naik, Erik Cambria, and Rada Mihalcea. Meld: A multimodal multi-party dataset for emotion recognition in conversations. In Annual Meeting of the Association for Computational Linguistics (ACL), pages 527-536, 2019." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 464, + 505, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 464, + 505, + 499 + ], + "spans": [ + { + "bbox": [ + 106, + 464, + 505, + 499 + ], + "type": "text", + "content": "Yujia Qin, Shihao Liang, Yining Ye, Kunlun Zhu, Lan Yan, Yaxi Lu, Yankai Lin, Xin Cong, Xiangru Tang, Bill Qian, et al. Toolllm: Facilitating large language models to master 16000+ real-world apis. arXiv preprint arXiv:2307.16789, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 507, + 505, + 541 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 507, + 505, + 541 + ], + "spans": [ + { + "bbox": [ + 106, + 507, + 505, + 541 + ], + "type": "text", + "content": "Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, and Ilya Sutskever. Robust speech recognition via large-scale weak supervision. In International Conference on Machine Learning (ICML), pages 28492-28518, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 548, + 505, + 584 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 548, + 505, + 584 + ], + "spans": [ + { + "bbox": [ + 106, + 548, + 505, + 584 + ], + "type": "text", + "content": "Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. In Advances in Neural Information Processing Systems (NeurIPS), pages 53728-53741, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 592, + 505, + 637 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 592, + 505, + 637 + ], + "spans": [ + { + "bbox": [ + 106, + 592, + 505, + 637 + ], + "type": "text", + "content": "S Sakshi, Utkarsh Tyagi, Sonal Kumar, Ashish Seth, Ramaneswaran Selvakumar, Oriol Nieto, Ramani Duraiswami, Sreyan Ghosh, and Dinesh Manocha. Mmau: A massive multi-task audio understanding and reasoning benchmark. In International Conference on Learning Representations (ICLR), 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 645, + 505, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 645, + 505, + 680 + ], + "spans": [ + { + "bbox": [ + 106, + 645, + 505, + 680 + ], + "type": "text", + "content": "Hao Shao, Shengju Qian, Han Xiao, Guanglu Song, Zhuofan Zong, Letian Wang, Yu Liu, and Hongsheng Li. Visual cot: Unleashing chain-of-thought reasoning in multi-modal language models. arXiv preprint arXiv:2403.16999, 2024a." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 688, + 505, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 688, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 106, + 688, + 505, + 723 + ], + "type": "text", + "content": "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024b." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 506, + 116 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 506, + 116 + ], + "type": "text", + "content": "Zayne Sprague, Fangcong Yin, Juan Diego Rodriguez, Dongwei Jiang, Manya Wadhwa, Prasann Singhal, Xinyu Zhao, Xi Ye, Kyle Mahowald, and Greg Durrett. To cot or not to cot? chain-of-thought helps mainly on math and symbolic reasoning. arXiv preprint arXiv:2409.12183, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 124, + 506, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 124, + 506, + 158 + ], + "spans": [ + { + "bbox": [ + 106, + 124, + 506, + 158 + ], + "type": "text", + "content": "Kaya Stechly, Karthik Valmeekam, and Subbarao Kambhampati. Chain of thoughtlessness? an analysis of cot in planning. In Advances in Neural Information Processing Systems (NeurIPS), pages 29106-29141, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 163, + 504, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 163, + 504, + 198 + ], + "spans": [ + { + "bbox": [ + 106, + 163, + 504, + 198 + ], + "type": "text", + "content": "Yixuan Su, Tian Lan, Huayang Li, Jialu Xu, Yan Wang, and Deng Cai. Pandagpt: One model to instruction-follow them all. In Workshop on Taming Large Language Models: Controllability in the era of Interactive Assistants (TLLM), pages 11-23, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 205, + 504, + 238 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 205, + 504, + 238 + ], + "spans": [ + { + "bbox": [ + 106, + 205, + 504, + 238 + ], + "type": "text", + "content": "Changli Tang, Wenyi Yu, Guangzhi Sun, Xianzhao Chen, Tian Tan, Wei Li, Lu Lu, Zejun Ma, and Chao Zhang. *Salmon: Towards generic hearing abilities for large language models.* arXiv preprint arXiv:2310.13289, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 245, + 506, + 279 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 245, + 506, + 279 + ], + "spans": [ + { + "bbox": [ + 106, + 245, + 506, + 279 + ], + "type": "text", + "content": "Yunlong Tang, Gen Zhan, Li Yang, Yiting Liao, and Chenliang Xu. Cardiff: Video salient object ranking chain of thought reasoning for saliency prediction with diffusion. arXiv preprint arXiv:2408.12009, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 285, + 504, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 285, + 504, + 320 + ], + "spans": [ + { + "bbox": [ + 106, + 285, + 504, + 320 + ], + "type": "text", + "content": "Gemini Team, Petko Georgiev, Ving Ian Lei, Ryan Burnell, Libin Bai, Anmol Gulati, Garrett Tanzer, Damien Vincent, Zhufeng Pan, Shibo Wang, et al. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 327, + 504, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 327, + 504, + 361 + ], + "spans": [ + { + "bbox": [ + 106, + 327, + 504, + 361 + ], + "type": "text", + "content": "Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 367, + 504, + 401 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 367, + 504, + 401 + ], + "spans": [ + { + "bbox": [ + 106, + 367, + 504, + 401 + ], + "type": "text", + "content": "Miles Turpin, Julian Michael, Ethan Perez, and Samuel Bowman. Language models don't always say what they think: Unfaithful explanations in chain-of-thought prompting. In Advances in Neural Information Processing Systems (NeurIPS), pages 74952-74965, 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 407, + 506, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 407, + 506, + 441 + ], + "spans": [ + { + "bbox": [ + 106, + 407, + 506, + 441 + ], + "type": "text", + "content": "Changhan Wang, Anne Wu, Jiatao Gu, and Juan Pino. Covost 2 and massively multilingual speech translation. In Conference of the International Speech Communication Association (Interspeech), pages 2247-2251, 2021." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 448, + 504, + 482 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 448, + 504, + 482 + ], + "spans": [ + { + "bbox": [ + 106, + 448, + 504, + 482 + ], + "type": "text", + "content": "Chen Wang, Minpeng Liao, Zhongqiang Huang, Jinliang Lu, Junhong Wu, Yuchen Liu, Chengqing Zong, and Jiajun Zhang. Blsp: Bootstrapping language-speech pre-training via behavior alignment of continuation writing. arXiv preprint arXiv:2309.00916, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 489, + 504, + 522 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 489, + 504, + 522 + ], + "spans": [ + { + "bbox": [ + 106, + 489, + 504, + 522 + ], + "type": "text", + "content": "Yan Wang, Yawen Zeng, Jingsheng Zheng, Xiaofen Xing, Jin Xu, and Xiangmin Xu. Videocot: A video chain-of-thought dataset with active annotation tool. In Workshop on Advances in Language and Vision Research (ALVR), pages 92-101, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 529, + 504, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 529, + 504, + 563 + ], + "spans": [ + { + "bbox": [ + 106, + 529, + 504, + 563 + ], + "type": "text", + "content": "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. In Advances in Neural Information Processing Systems (NeurIPS), pages 24824-24837, 2022." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 570, + 506, + 604 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 570, + 506, + 604 + ], + "spans": [ + { + "bbox": [ + 106, + 570, + 506, + 604 + ], + "type": "text", + "content": "Peng Wen, Teng-Gen Hu, Robert J Linhardt, Sen-Tai Liao, Hong Wu, and Yu-Xiao Zou. Mulberry: A review of bioactive compounds and advanced processing technology. Trends in food science & technology, 83:138-158, 2019." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 610, + 504, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 610, + 504, + 634 + ], + "spans": [ + { + "bbox": [ + 106, + 610, + 504, + 634 + ], + "type": "text", + "content": "Shengqiong Wu, Hao Fei, Leigang Qu, Wei Ji, and Tat-Seng Chua. Next-gpt: Any-to-any multimodal llm. In International Conference on Machine Learning (ICML), pages 53366-53397, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 640, + 504, + 663 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 640, + 504, + 663 + ], + "spans": [ + { + "bbox": [ + 106, + 640, + 504, + 663 + ], + "type": "text", + "content": "Zhifei Xie and Changqiao Wu. Mini-omni: Language models can hear, talk while thinking in streaming. arXiv preprint arXiv:2408.16725, 2024a." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 670, + 504, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 670, + 504, + 693 + ], + "spans": [ + { + "bbox": [ + 106, + 670, + 504, + 693 + ], + "type": "text", + "content": "Zhifei Xie and Changqiao Wu. Mini-omni2: Towards open-source gpt-4o with vision, speech and duplex capabilities. arXiv preprint arXiv:2410.11190, 2024b." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 699, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 699, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 106, + 699, + 504, + 723 + ], + "type": "text", + "content": "Guowei Xu, Peng Jin, Li Hao, Yibing Song, Lichao Sun, and Li Yuan. Llava-o1: Let vision language models reason step-by-step. arXiv preprint arXiv:2411.10440, 2024." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 573 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 506, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 506, + 106 + ], + "type": "text", + "content": "An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, et al. Qwen2. 5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122, 2024a." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 112, + 505, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 112, + 505, + 158 + ], + "spans": [ + { + "bbox": [ + 106, + 112, + 505, + 158 + ], + "type": "text", + "content": "Qian Yang, Jin Xu, Wenrui Liu, Yunfei Chu, Ziyue Jiang, Xiaohuan Zhou, Yichong Leng, Yuanjun Lv, Zhou Zhao, Chang Zhou, et al. Air-bench: Benchmarking large audio-language models via generative comprehension. In Annual Meeting of the Association for Computational Linguistics (ACL), pages 1979-1998, 2024b." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 163, + 506, + 199 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 163, + 506, + 199 + ], + "spans": [ + { + "bbox": [ + 106, + 163, + 506, + 199 + ], + "type": "text", + "content": "Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Tom Griffiths, Yuan Cao, and Karthik Narasimhan. Tree of thoughts: Deliberate problem solving with large language models. In Advances in Neural Information Processing Systems (NeurIPS), pages 11809-11822, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 205, + 504, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 205, + 504, + 239 + ], + "spans": [ + { + "bbox": [ + 106, + 205, + 504, + 239 + ], + "type": "text", + "content": "Yongcheng Zeng, Guoqing Liu, Weiyu Ma, Ning Yang, Haifeng Zhang, and Jun Wang. Token-level direct preference optimization. In International Conference on Machine Learning (ICML), pages 58348-58365, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 245, + 506, + 280 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 245, + 506, + 280 + ], + "spans": [ + { + "bbox": [ + 106, + 245, + 506, + 280 + ], + "type": "text", + "content": "Dong Zhang, Shimin Li, Xin Zhang, Jun Zhan, Pengyu Wang, Yaqian Zhou, and Xipeng Qiu. Speechgpt: Empowering large language models with intrinsic cross-modal conversational abilities. arXiv preprint arXiv:2305.11000, 2023a." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 286, + 506, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 286, + 506, + 320 + ], + "spans": [ + { + "bbox": [ + 106, + 286, + 506, + 320 + ], + "type": "text", + "content": "Dong Zhang, Shimin Li, Xin Zhang, Jun Zhan, Pengyu Wang, Yaqian Zhou, and Xipeng Qiu. Speechgpt: Empowering large language models with intrinsic cross-modal conversational abilities. In Empirical Methods in Natural Language Processing (EMNLP), pages 15757-15773, 2023b." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 327, + 506, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 327, + 506, + 361 + ], + "spans": [ + { + "bbox": [ + 106, + 327, + 506, + 361 + ], + "type": "text", + "content": "Ruohong Zhang, Bowen Zhang, Yanghao Li, Haotian Zhang, Zhiqing Sun, Zhe Gan, Yinfei Yang, Ruoming Pang, and Yiming Yang. Improve vision language model chain-of-thought reasoning. arXiv preprint arXiv:2410.16198, 2024a." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 367, + 506, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 367, + 506, + 392 + ], + "spans": [ + { + "bbox": [ + 107, + 367, + 506, + 392 + ], + "type": "text", + "content": "Yuxiang Zhang, Shangxi Wu, Yuqi Yang, Jiangming Shu, Jinlin Xiao, Chao Kong, and Jitao Sang. o1-coder: an o1 replication for coding. arXiv preprint arXiv:2412.00154, 2024b." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 397, + 504, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 397, + 504, + 421 + ], + "spans": [ + { + "bbox": [ + 106, + 397, + 504, + 421 + ], + "type": "text", + "content": "Zhuosheng Zhang, Aston Zhang, Mu Li, and Alex Smola. Automatic chain of thought prompting in large language models. arXiv preprint arXiv:2210.03493, 2022." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 427, + 506, + 461 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 427, + 506, + 461 + ], + "spans": [ + { + "bbox": [ + 106, + 427, + 506, + 461 + ], + "type": "text", + "content": "Yu Zhao, Huifeng Yin, Bo Zeng, Hao Wang, Tianqi Shi, Chenyang Lyu, Longyue Wang, Weihua Luo, and Kaifu Zhang. Marco-ol: Towards open reasoning models for open-ended solutions. arXiv preprint arXiv:2411.14405, 2024a." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 468, + 506, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 468, + 506, + 502 + ], + "spans": [ + { + "bbox": [ + 106, + 468, + 506, + 502 + ], + "type": "text", + "content": "Yuze Zhao, Jintao Huang, Jinghan Hu, Xingjun Wang, Yunlin Mao, Daoze Zhang, Zeyinzi Jiang, Zhikai Wu, Baole Ai, Ang Wang, et al. Swift: a scalable lightweight infrastructure for fine-tuning. arXiv preprint arXiv:2408.05517, 2024b." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 509, + 504, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 509, + 504, + 542 + ], + "spans": [ + { + "bbox": [ + 106, + 509, + 504, + 542 + ], + "type": "text", + "content": "Qiji Zhou, Ruochen Zhou, Zike Hu, Panzhong Lu, Siyang Gao, and Yue Zhang. Image-of-thought prompting for visual reasoning refinement in multimodal large language models. arXiv preprint arXiv:2405.13872, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 548, + 504, + 573 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 548, + 504, + 573 + ], + "spans": [ + { + "bbox": [ + 106, + 548, + 504, + 573 + ], + "type": "text", + "content": "Anni Zou, Zhuosheng Zhang, Hai Zhao, and Xiangru Tang. Generalizable chain-of-thought prompting in mixed-task scenarios with large language models. arXiv preprint arXiv:2310.06692, 2023." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 207, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 207, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 207, + 85 + ], + "type": "text", + "content": "A Prompt Details" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 90, + 506, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 90, + 506, + 255 + ], + "spans": [ + { + "bbox": [ + 104, + 90, + 506, + 255 + ], + "type": "text", + "content": "A universally applicable method for writing prompts involves three key components: a clear task definition, a structured example, and a precise format specification. Our prompt adheres to this methodology by first defining the task explicitly, outlining the need for detailed audio descriptions and progressively challenging questions. It then provides a structured example that demonstrates the expected output format, ensuring clarity and minimizing ambiguity. Lastly, it specifies the exact formatting rules using delimiters such as ... and ..., ensuring consistency in responses. This approach guarantees efficiency by eliminating interpretative variance, allowing for precise and reproducible outputs. When drafting this prompt, we adhered to a structured approach to maximize clarity and effectiveness. The first-person perspective is used to emphasize our direct involvement in designing the task, ensuring the reader understands the rationale behind each structural choice. The structure follows a logical progression: we begin by introducing the general method, transition into an explanation of how our prompt aligns with this method, and conclude by justifying the approach's efficiency. By maintaining an academic tone, we reinforce the credibility and rigor of our prompt-writing methodology. We list some of the prompts used for tasks at different stages following below." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 262, + 411, + 274 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 262, + 411, + 274 + ], + "spans": [ + { + "bbox": [ + 105, + 262, + 411, + 274 + ], + "type": "text", + "content": "A.1 Prompt of Stage 1 when Processing Data (Sample from AudioSet)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 119, + 285, + 491, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 285, + 491, + 374 + ], + "spans": [ + { + "bbox": [ + 119, + 285, + 491, + 374 + ], + "type": "text", + "content": "We are annotating some audio and designing some questions. You are an excellent audio analyst. Next, you will receive an audio and one absolutely correct but simple description. Your task is to first generate a more detailed, in-depth and absolutely correct new description based on the given descriptions. Then, use this description to generate three open-ended or single-choice questions with four options along with their answers. Please separate different parts using ...\n...\n...\n\n...\n...." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 119, + 383, + 491, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 383, + 491, + 462 + ], + "spans": [ + { + "bbox": [ + 119, + 383, + 491, + 462 + ], + "type": "text", + "content": "Here is a sample. Please strictly follow the format in the sample. The audio presents a sustained, high-frequency static noise, characteristic of a detuned or malfunctioning electronic device, likely a television or radio...Describe the characteristics of the static noise in the audio, and how these characteristics change over time. What...? " + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 120, + 470, + 332, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 470, + 332, + 481 + ], + "spans": [ + { + "bbox": [ + 120, + 470, + 332, + 481 + ], + "type": "text", + "content": "Here is the original description: *** label here ***." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 121, + 482, + 194, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 482, + 194, + 492 + ], + "spans": [ + { + "bbox": [ + 121, + 482, + 194, + 492 + ], + "type": "text", + "content": "Here is the audio." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 121, + 493, + 490, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 493, + 490, + 536 + ], + "spans": [ + { + "bbox": [ + 121, + 493, + 490, + 536 + ], + "type": "text", + "content": "Please design three questions that gradually become more challenging, starting from basic factual questions, but don't deviate from the content of the audio itself. If it's a single-choice question, please give four options like (a) one, (b) two, .... and the answer should be analyzed and end with a format like \"the answer is B.\"" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 411, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 411, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 411, + 85 + ], + "type": "text", + "content": "A.2 Prompt of Stage 2 when Processing Data (Sample from AudioSet)" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 121, + 95, + 489, + 272 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 95, + 489, + 272 + ], + "spans": [ + { + "bbox": [ + 121, + 95, + 489, + 272 + ], + "type": "text", + "content": "We are now designing a system to generate structured audio-based chain-of-thought reasoning data. You will receive an audio clip, its textual description, as well as a question and its answer. Your task is to explore in more detail the thinking process from the question to the answer. Your chain of thought will be divided into two main parts, the and sections. In the section, you need to go through four steps: planning, captioning, reasoning, and summarizing. The section is invisible to the user. Therefore, in the section, you need to base on all the reasoning processes and results in the section and provide a final reply based on the question. Finally, your reply should strictly follow the following format: (In this part, analyze the user's needs and how to complete the task. If the problem is complex, it should be done step by step) (In this part, conduct an overall analysis of the given audio input content, try to find all the parts related to the question, describe them, and ensure it is completely correct.) (Start reasoning towards the final answer here) (Draw appropriate results based on the reasoning part here) Give the final answer here referring to the part Please strictly follow the format of the sample." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 121, + 281, + 157, + 292 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 281, + 157, + 292 + ], + "spans": [ + { + "bbox": [ + 121, + 281, + 157, + 292 + ], + "type": "text", + "content": "Sample:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 121, + 293, + 165, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 293, + 165, + 303 + ], + "spans": [ + { + "bbox": [ + 121, + 293, + 165, + 303 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 121, + 304, + 185, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 304, + 185, + 313 + ], + "spans": [ + { + "bbox": [ + 121, + 304, + 185, + 313 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 121, + 314, + 465, + 325 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 314, + 465, + 325 + ], + "spans": [ + { + "bbox": [ + 121, + 314, + 465, + 325 + ], + "type": "text", + "content": "The user wants to understand the dynamic changes within the provided audio clip ...." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 121, + 326, + 187, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 326, + 187, + 335 + ], + "spans": [ + { + "bbox": [ + 121, + 326, + 187, + 335 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 121, + 336, + 177, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 336, + 177, + 346 + ], + "spans": [ + { + "bbox": [ + 121, + 336, + 177, + 346 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 121, + 346, + 488, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 346, + 488, + 369 + ], + "spans": [ + { + "bbox": [ + 121, + 346, + 488, + 369 + ], + "type": "text", + "content": "The audio clip predominantly features static noise. .... similar to that of a detuned television or a device failing to receive a signal." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 121, + 369, + 180, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 369, + 180, + 379 + ], + "spans": [ + { + "bbox": [ + 121, + 369, + 180, + 379 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 121, + 380, + 191, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 380, + 191, + 389 + ], + "spans": [ + { + "bbox": [ + 121, + 380, + 191, + 389 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 121, + 391, + 488, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 391, + 488, + 434 + ], + "spans": [ + { + "bbox": [ + 121, + 391, + 488, + 434 + ], + "type": "text", + "content": "1. Identify changes in Intensity (Volume): The audio's static noise does not remain at a constant volume. There are noticeable increases and decreases in loudness throughout the clip. .... is struggling to maintain a consistent output, adding to the impression of something malfunctioning or broken." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 121, + 434, + 194, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 434, + 194, + 444 + ], + "spans": [ + { + "bbox": [ + 121, + 434, + 194, + 444 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 121, + 445, + 185, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 445, + 185, + 455 + ], + "spans": [ + { + "bbox": [ + 121, + 445, + 185, + 455 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 121, + 456, + 488, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 456, + 488, + 478 + ], + "spans": [ + { + "bbox": [ + 121, + 456, + 488, + 478 + ], + "type": "text", + "content": "The static noise in the audio is highly dynamic. .... leading to a sense of disorder and instability." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 121, + 479, + 187, + 488 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 479, + 187, + 488 + ], + "spans": [ + { + "bbox": [ + 121, + 479, + 187, + 488 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 121, + 489, + 168, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 489, + 168, + 498 + ], + "spans": [ + { + "bbox": [ + 121, + 489, + 168, + 498 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 121, + 500, + 184, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 500, + 184, + 510 + ], + "spans": [ + { + "bbox": [ + 121, + 500, + 184, + 510 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 121, + 510, + 488, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 510, + 488, + 533 + ], + "spans": [ + { + "bbox": [ + 121, + 510, + 488, + 533 + ], + "type": "text", + "content": "The audio presents a static noise, .... The overall effect is one of energetic chaos, preventing any possibility of calm or predictability." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 121, + 533, + 186, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 533, + 186, + 542 + ], + "spans": [ + { + "bbox": [ + 121, + 533, + 186, + 542 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 121, + 554, + 489, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 554, + 489, + 630 + ], + "spans": [ + { + "bbox": [ + 121, + 554, + 489, + 630 + ], + "type": "text", + "content": "Note that you have both the question and the answer because it is necessary to ensure the correctness of the chain of thought. However, in your response, you can only refer to the content of the question and the audio, and lead to the answer. You must absolutely not assume that you already know the answer. Please provide a detailed and flexible response with high-quality logic in both the caption and reasoning sections. If the reasoning part requires complex logic, you can even propose several different approaches and try them one by one. Here is the original description: *** caption here ***." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 121, + 631, + 283, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 631, + 283, + 641 + ], + "spans": [ + { + "bbox": [ + 121, + 631, + 283, + 641 + ], + "type": "text", + "content": "The question is: *** question here ***." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 121, + 642, + 331, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 642, + 331, + 652 + ], + "spans": [ + { + "bbox": [ + 121, + 642, + 331, + 652 + ], + "type": "text", + "content": "The answer you can refer to : *** answer here ***." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 121, + 653, + 489, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 653, + 489, + 685 + ], + "spans": [ + { + "bbox": [ + 121, + 653, + 489, + 685 + ], + "type": "text", + "content": "Again, don't mention that you have the answer and the description because they are only here to help you to design the chain of thought but should not exist in the real-world scenario, either in the think or response sections." + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 411, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 411, + 84 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 411, + 84 + ], + "type": "text", + "content": "A.3 Prompt of Stage 3 when Processing Data (Sample from AudioSet)" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 120, + 95, + 491, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 95, + 491, + 149 + ], + "spans": [ + { + "bbox": [ + 120, + 95, + 491, + 149 + ], + "type": "text", + "content": "We are data reviewers. Next, you will receive an audio clip, along with its description, questions, answers, and most importantly, the thought process for solving the problems. Please determine and analyze whether all of these elements are completely correct, especially check if there are any hallucinations in the thought process. Return if there are no issues, and if there are errors in the data." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 121, + 150, + 359, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 150, + 359, + 162 + ], + "spans": [ + { + "bbox": [ + 121, + 150, + 359, + 162 + ], + "type": "text", + "content": "Here is the description of the audio: *** caption here ***." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 121, + 162, + 302, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 162, + 302, + 172 + ], + "spans": [ + { + "bbox": [ + 121, + 162, + 302, + 172 + ], + "type": "text", + "content": "Here is the question: *** question here ***." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 121, + 172, + 292, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 172, + 292, + 182 + ], + "spans": [ + { + "bbox": [ + 121, + 172, + 292, + 182 + ], + "type": "text", + "content": "Here is the answer: *** answer here ***." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 121, + 183, + 369, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 183, + 369, + 194 + ], + "spans": [ + { + "bbox": [ + 121, + 183, + 369, + 194 + ], + "type": "text", + "content": "And here is the thought process: *** COT process here ***." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 121, + 194, + 488, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 194, + 488, + 216 + ], + "spans": [ + { + "bbox": [ + 121, + 194, + 488, + 216 + ], + "type": "text", + "content": "Please conduct a thorough judgment and analysis and provide the result in the specified format." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 233, + 309, + 247 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 233, + 309, + 247 + ], + "spans": [ + { + "bbox": [ + 105, + 233, + 309, + 247 + ], + "type": "text", + "content": "B Synthetic Data Generation Pipeline" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 252, + 252, + 264 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 252, + 252, + 264 + ], + "spans": [ + { + "bbox": [ + 105, + 252, + 252, + 264 + ], + "type": "text", + "content": "B.1 Synthetic Data Introduction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 266, + 506, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 266, + 506, + 365 + ], + "spans": [ + { + "bbox": [ + 104, + 266, + 506, + 365 + ], + "type": "text", + "content": "Multi-Speaker Dataset: To enhance the model's ability to comprehend complex, multi-turn conversations among multiple speakers, we constructed the Multi-Speaker dataset using text-to-speech (TTS) technology. The dataset generation process consists of three steps: (1) Conversation Text Generation: We utilized commercial models to generate diverse multi-speaker conversation texts covering a wide range of scenarios. (2) Speech Synthesis: Leveraging all available timbres from LibriSpeech [Panayotov et al., 2015] as prompts, we employed the CosyVoice2 [Du et al., 2024] framework to synthesize high-quality speech samples. (3) Dataset Assembly: The synthesized speech samples, fearring distinct timbres, were carefully combined to create a rich and diverse multi-speaker conversation dataset." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 370, + 504, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 370, + 504, + 393 + ], + "spans": [ + { + "bbox": [ + 104, + 370, + 504, + 393 + ], + "type": "text", + "content": "This approach ensures that the dataset effectively trains the model to navigate intricate speaker interactions in natural conversations." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 398, + 504, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 398, + 504, + 497 + ], + "spans": [ + { + "bbox": [ + 104, + 398, + 504, + 497 + ], + "type": "text", + "content": "Complex Audio Dataset: To strengthen the model's ability to process intricate audio compositions, we developed the Complex Audio dataset. The audio materials are sourced from AudioSet, AudioCaps, and our internal material library. The dataset consists of two primary types of audio data: (1) Temporally Complex Audio: Short audio clips were sequentially arranged, requiring the model to perform tasks such as sorting or counting based on temporal relationships. We sequentially arranged different short audio clips and required the model to perform sorting or counting tasks. (2) Composite Complex Audio: Multiple long-form audio tracks—such as engine sounds and musical instruments—were blended. The model was then tasked with extracting specific target sounds from the composite audio, enhancing its ability to analyze overlapping audio sources." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 501, + 504, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 501, + 504, + 525 + ], + "spans": [ + { + "bbox": [ + 104, + 501, + 504, + 525 + ], + "type": "text", + "content": "This dataset ensures that the model is well-equipped to handle real-world audio challenges, such as distinguishing between multiple concurrent sounds and identifying temporal patterns." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 528, + 504, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 528, + 504, + 552 + ], + "spans": [ + { + "bbox": [ + 104, + 528, + 504, + 552 + ], + "type": "text", + "content": "To better understand what Multi-Speaker dataset and Complex Audio dataset are respectively tasked with, we provide some examples in Sec. B.2, Sec. B.3." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 291, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 291, + 84 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 291, + 84 + ], + "type": "text", + "content": "B.2 Sample from Complex Audio Dataset" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 120, + 95, + 185, + 117 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 120, + 95, + 166, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 95, + 166, + 106 + ], + "spans": [ + { + "bbox": [ + 120, + 95, + 166, + 106 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 120, + 106, + 185, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 106, + 185, + 117 + ], + "spans": [ + { + "bbox": [ + 120, + 106, + 185, + 117 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 119, + 118, + 489, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 118, + 489, + 160 + ], + "spans": [ + { + "bbox": [ + 119, + 118, + 489, + 160 + ], + "type": "text", + "content": "The question is about identifying the sounds in the audio and arranging them in the correct order. I will carefully listen to the audio multiple times, noting down the sounds as I recognize them. Then, I will compare the identified sound sequence with the provided answer options to choose the one that matches best." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 120, + 161, + 188, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 161, + 188, + 171 + ], + "spans": [ + { + "bbox": [ + 120, + 161, + 188, + 171 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 120, + 172, + 490, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 172, + 490, + 215 + ], + "spans": [ + { + "bbox": [ + 120, + 172, + 490, + 215 + ], + "type": "text", + "content": "Upon initial listening, I can discern sounds that seem like a shattering noise, an animal sound which I think sounds like a cow, a cat sound, and what sounds like some mechanical tools working, perhaps a chainsaw. I shall listen again to clarify the sounds and their order." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 120, + 216, + 180, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 216, + 180, + 225 + ], + "spans": [ + { + "bbox": [ + 120, + 216, + 180, + 225 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 120, + 226, + 440, + 238 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 226, + 440, + 238 + ], + "spans": [ + { + "bbox": [ + 120, + 226, + 440, + 238 + ], + "type": "text", + "content": "1. After relistening, the first sound is definitely glass breaking." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 120, + 238, + 488, + 379 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 120, + 238, + 317, + 249 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 238, + 317, + 249 + ], + "spans": [ + { + "bbox": [ + 120, + 238, + 317, + 249 + ], + "type": "text", + "content": "2. The second sound is definitely a cow moing." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 121, + 249, + 269, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 249, + 269, + 259 + ], + "spans": [ + { + "bbox": [ + 121, + 249, + 269, + 259 + ], + "type": "text", + "content": "3. The third sound is a cat meowing." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 121, + 259, + 260, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 259, + 260, + 270 + ], + "spans": [ + { + "bbox": [ + 121, + 259, + 260, + 270 + ], + "type": "text", + "content": "4. The fourth sound is a chainsaw." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 121, + 270, + 470, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 270, + 470, + 281 + ], + "spans": [ + { + "bbox": [ + 121, + 270, + 470, + 281 + ], + "type": "text", + "content": "5. I compare this sequence (glass break, cow, cat, chainsaw) with the options provided." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 121, + 281, + 297, + 292 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 281, + 297, + 292 + ], + "spans": [ + { + "bbox": [ + 121, + 281, + 297, + 292 + ], + "type": "text", + "content": "6. Option (a) matches the order I identified." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 121, + 293, + 194, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 293, + 194, + 302 + ], + "spans": [ + { + "bbox": [ + 121, + 293, + 194, + 302 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 120, + 303, + 488, + 325 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 303, + 488, + 325 + ], + "spans": [ + { + "bbox": [ + 120, + 303, + 488, + 325 + ], + "type": "text", + "content": "Based on the detailed listening and analysis, the correct order of sounds in the audio is: glass break, cow, cat, chainsaw. This matches option (a)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 120, + 325, + 187, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 325, + 187, + 335 + ], + "spans": [ + { + "bbox": [ + 120, + 325, + 187, + 335 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 120, + 335, + 167, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 335, + 167, + 346 + ], + "spans": [ + { + "bbox": [ + 120, + 335, + 167, + 346 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 120, + 346, + 184, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 346, + 184, + 357 + ], + "spans": [ + { + "bbox": [ + 120, + 346, + 184, + 357 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 120, + 357, + 188, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 357, + 188, + 368 + ], + "spans": [ + { + "bbox": [ + 120, + 357, + 188, + 368 + ], + "type": "text", + "content": "The answer is a." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 120, + 369, + 187, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 369, + 187, + 379 + ], + "spans": [ + { + "bbox": [ + 120, + 369, + 187, + 379 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 287, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 287, + 84 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 287, + 84 + ], + "type": "text", + "content": "B.3 Sample from Multi-Speaker Dataset" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 120, + 95, + 166, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 95, + 166, + 106 + ], + "spans": [ + { + "bbox": [ + 120, + 95, + 166, + 106 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 120, + 106, + 185, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 106, + 185, + 117 + ], + "spans": [ + { + "bbox": [ + 120, + 106, + 185, + 117 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 119, + 118, + 489, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 118, + 489, + 161 + ], + "spans": [ + { + "bbox": [ + 119, + 118, + 489, + 161 + ], + "type": "text", + "content": "The user asks me to identify the number of speakers in the audio clip. To do this, I will listen carefully and try to distinguish unique voices. The conversation is between two hikers, I need to confirm this by identifying their individual lines and vocal characteristics. Finally, I will provide the answer based on the number of identified speakers." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 120, + 162, + 188, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 162, + 188, + 171 + ], + "spans": [ + { + "bbox": [ + 120, + 162, + 188, + 171 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 120, + 172, + 178, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 172, + 178, + 182 + ], + "spans": [ + { + "bbox": [ + 120, + 172, + 178, + 182 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 120, + 183, + 389, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 183, + 389, + 194 + ], + "spans": [ + { + "bbox": [ + 120, + 183, + 389, + 194 + ], + "type": "text", + "content": "The audio presents a conversation between two individuals hiking." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 119, + 194, + 490, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 194, + 490, + 270 + ], + "spans": [ + { + "bbox": [ + 119, + 194, + 490, + 270 + ], + "type": "text", + "content": "Speaker 1: This person starts by commenting on the intensity of the climb and expresses doubt about continuing without a break. Later, after a reminder about the ranger's warning, this speaker agrees to turn back. The vocal tone suggests exertion and a desire to reach the summit, eventually giving way to agreement. Phrases spoken: \"Wow, this climb is intense. I am not sure I can go much further without a serious break,\" \"Turn back. But we're so close to the summit. I really want to make it, you know,\" \"Unpredictable mountain weather, okay. Okay, you are right.\"" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 119, + 270, + 491, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 270, + 491, + 335 + ], + "spans": [ + { + "bbox": [ + 119, + 270, + 491, + 335 + ], + "type": "text", + "content": "Speaker 2: This person responds to the first speaker's concerns, suggesting they turn back due to the darkening sky and diminishing daylight. They emphasize safety and remind the other speaker about the ranger's warning. The vocal tone conveys concern and prioritizes safety. Phrases spoken: \"I know right. I am starting to think we should turn back. That sky looks awfully dark, and we're losing daylight quickly,\" \"I hear you, but safety first. Remember what the ranger said about the unpredictable mountain weather,\" \"So, turn back.\"" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 119, + 335, + 490, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 335, + 490, + 357 + ], + "spans": [ + { + "bbox": [ + 119, + 335, + 490, + 357 + ], + "type": "text", + "content": "Based on the clear distinction in vocal tones and the back-and-forth nature of the dialogue, I can confirm that there are two distinct speakers in this audio clip." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 120, + 358, + 180, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 358, + 180, + 368 + ], + "spans": [ + { + "bbox": [ + 120, + 358, + 180, + 368 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 120, + 369, + 191, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 369, + 191, + 378 + ], + "spans": [ + { + "bbox": [ + 120, + 369, + 191, + 378 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 119, + 379, + 490, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 379, + 490, + 411 + ], + "spans": [ + { + "bbox": [ + 119, + 379, + 490, + 411 + ], + "type": "text", + "content": "Initial Assessment: The audio starts with two distinct voices in a hiking - related dialogue. The voices are clearly distinguishable from the get - go, with one having a deeper timbre and the other a higher - pitched one." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 119, + 412, + 490, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 412, + 490, + 455 + ], + "spans": [ + { + "bbox": [ + 119, + 412, + 490, + 455 + ], + "type": "text", + "content": "Dialogue Analysis: A close look at the content shows a clear difference in the speakers' stances. One is eager to press on to the summit, saying, \"We're almost there. Let's keep going.\" The other is more cautious, citing the darkening sky and fading daylight, stating, \"The weather looks bad. We should turn back.\"" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 119, + 456, + 490, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 456, + 490, + 489 + ], + "spans": [ + { + "bbox": [ + 119, + 456, + 490, + 489 + ], + "type": "text", + "content": "Vocal Tone Differentiation: Each speaker's vocal tone stays consistent. The eager speaker has an energetic and determined tone, with a rising pitch when advocating for continuing. The cautious speaker has a measured, worried tone, with a lower pitch when warning of danger." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 119, + 489, + 490, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 489, + 490, + 499 + ], + "spans": [ + { + "bbox": [ + 119, + 489, + 490, + 499 + ], + "type": "text", + "content": "Confirming Speaker Count: A review of the audio shows no extra voices or interruptions." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 119, + 500, + 490, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 500, + 490, + 510 + ], + "spans": [ + { + "bbox": [ + 119, + 500, + 490, + 510 + ], + "type": "text", + "content": "The conversation moves smoothly between the two speakers, indicating only two are present." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 119, + 511, + 455, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 511, + 455, + 521 + ], + "spans": [ + { + "bbox": [ + 119, + 511, + 455, + 521 + ], + "type": "text", + "content": "Final Answer: Based on this analysis, there are two individuals in the conversation." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 120, + 521, + 211, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 521, + 211, + 531 + ], + "spans": [ + { + "bbox": [ + 120, + 521, + 211, + 531 + ], + "type": "text", + "content": "Thus, the answer is B." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 120, + 532, + 194, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 532, + 194, + 542 + ], + "spans": [ + { + "bbox": [ + 120, + 532, + 194, + 542 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 120, + 543, + 185, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 543, + 185, + 553 + ], + "spans": [ + { + "bbox": [ + 120, + 543, + 185, + 553 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 119, + 554, + 490, + 565 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 554, + 490, + 565 + ], + "spans": [ + { + "bbox": [ + 119, + 554, + 490, + 565 + ], + "type": "text", + "content": "Based on the dialogue and vocal tone analysis, there are two distinct speakers in the audio." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 120, + 565, + 190, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 565, + 190, + 575 + ], + "spans": [ + { + "bbox": [ + 120, + 565, + 190, + 575 + ], + "type": "text", + "content": "The answer is B." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 120, + 576, + 188, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 576, + 188, + 586 + ], + "spans": [ + { + "bbox": [ + 120, + 576, + 188, + 586 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 120, + 586, + 168, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 586, + 168, + 597 + ], + "spans": [ + { + "bbox": [ + 120, + 586, + 168, + 597 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 120, + 597, + 184, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 597, + 184, + 608 + ], + "spans": [ + { + "bbox": [ + 120, + 597, + 184, + 608 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 119, + 609, + 490, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 609, + 490, + 630 + ], + "spans": [ + { + "bbox": [ + 119, + 609, + 490, + 630 + ], + "type": "text", + "content": "Based on the dialogue and distinct vocal tones present in the audio, I have identified two individuals engaged in the conversation. Therefore, the answer is B." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 120, + 631, + 187, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 631, + 187, + 640 + ], + "spans": [ + { + "bbox": [ + 120, + 631, + 187, + 640 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 27 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 97, + 504, + 274 + ], + "blocks": [ + { + "bbox": [ + 105, + 71, + 258, + 85 + ], + "lines": [ + { + "bbox": [ + 105, + 71, + 258, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 258, + 85 + ], + "type": "text", + "content": "C Further Dataset Analysis" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 106, + 97, + 504, + 274 + ], + "lines": [ + { + "bbox": [ + 106, + 97, + 504, + 274 + ], + "spans": [ + { + "bbox": [ + 106, + 97, + 504, + 274 + ], + "type": "image", + "image_path": "2c7cf6efc88d700632067417e48e5a03b2a49288065af2afae5640b0f1376a7a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 281, + 504, + 305 + ], + "lines": [ + { + "bbox": [ + 104, + 281, + 504, + 305 + ], + "spans": [ + { + "bbox": [ + 104, + 281, + 504, + 305 + ], + "type": "text", + "content": "Figure 6: Detailed information of the CoTA dataset: The maximum, minimum, and average number of words in the answers, as well as the average length of the audio." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 316, + 504, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 316, + 504, + 382 + ], + "spans": [ + { + "bbox": [ + 104, + 316, + 504, + 382 + ], + "type": "text", + "content": "CoTA's reasoning adaptability is also evident in its distribution of token lengths and audio duration across tasks in Figure 6. The dataset accommodates a broad range of reasoning complexities, with token lengths spanning 327 (CoVoST2) to 1,454 (Multi-Speaker), ensuring coverage of both concise and highly intricate reasoning processes. Notably, tasks requiring deep logical inference, such as complex audio, exhibit a well-balanced token distribution " + }, + { + "bbox": [ + 104, + 316, + 504, + 382 + ], + "type": "inline_equation", + "content": "(\\max = 423, \\text{avg} = 192.96)" + }, + { + "bbox": [ + 104, + 316, + 504, + 382 + ], + "type": "text", + "content": ", supporting structured multi-step reasoning without unnecessary redundancy." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 386, + 506, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 386, + 506, + 475 + ], + "spans": [ + { + "bbox": [ + 104, + 386, + 506, + 475 + ], + "type": "text", + "content": "Further, the dataset's average token lengths (164.48-481.57) align with task difficulty: longer reasoning chains characterize tasks such as sound description (AudioSet: 395.26) and music understanding (MusicBench: 463.89). Meanwhile, CoTA ensures practical generalization by maintaining audio durations between 2.85s and 26.34s, where shorter clips (e.g., MELD: 2.84s) support concise context-dependent reasoning, while extended sequences (Multi-Speaker: 26.34s) enable complex multi-turn inference. This systematic variation in reasoning depth and audio granularity ensures adaptability across diverse tasks, addressing the limitation of one-size-fits-all reasoning chains in existing audio datasets." + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 740, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 740, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 740, + 311, + 750 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 225, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 225, + 83 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 225, + 83 + ], + "type": "text", + "content": "D More Case Studies" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 121, + 113, + 147, + 138 + ], + "blocks": [ + { + "bbox": [ + 121, + 113, + 147, + 138 + ], + "lines": [ + { + "bbox": [ + 121, + 113, + 147, + 138 + ], + "spans": [ + { + "bbox": [ + 121, + 113, + 147, + 138 + ], + "type": "image", + "image_path": "9b43c9b6c9f74919ef542d987cdc96e3d1e58021911f412304ed512d5557ae1d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 159, + 567, + 450, + 578 + ], + "lines": [ + { + "bbox": [ + 159, + 567, + 450, + 578 + ], + "spans": [ + { + "bbox": [ + 159, + 567, + 450, + 578 + ], + "type": "text", + "content": "Figure 7: Example of Audio-Reasoner answering music-based question." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 126, + 140, + 144, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 140, + 144, + 148 + ], + "spans": [ + { + "bbox": [ + 126, + 140, + 144, + 148 + ], + "type": "text", + "content": "User" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 175, + 118, + 455, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 118, + 455, + 159 + ], + "spans": [ + { + "bbox": [ + 175, + 118, + 455, + 159 + ], + "type": "text", + "content": "Analyze the interplay between the vocal sample, synth elements, and percussion within the track. How do these elements contribute to the overall texture and dynamic progression of the piece, particularly focusing on the shift from the intro to the main rhythm?" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 176, + 160, + 282, + 179 + ], + "blocks": [ + { + "bbox": [ + 176, + 160, + 282, + 179 + ], + "lines": [ + { + "bbox": [ + 176, + 160, + 282, + 179 + ], + "spans": [ + { + "bbox": [ + 176, + 160, + 282, + 179 + ], + "type": "image", + "image_path": "858369a50834266a93f34f3916a01464cba0e42a19f40a500f1166c517b2870b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 461, + 188, + 486, + 214 + ], + "blocks": [ + { + "bbox": [ + 461, + 188, + 486, + 214 + ], + "lines": [ + { + "bbox": [ + 461, + 188, + 486, + 214 + ], + "spans": [ + { + "bbox": [ + 461, + 188, + 486, + 214 + ], + "type": "image", + "image_path": "ba4834b047ef995baaa14b47b6dfb094678968914445d6cc380fd50ae8d1ba44.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 457, + 216, + 492, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 457, + 216, + 492, + 232 + ], + "spans": [ + { + "bbox": [ + 457, + 216, + 492, + 232 + ], + "type": "text", + "content": "Audio Reasoner" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 145, + 211, + 177, + 218 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 211, + 177, + 218 + ], + "spans": [ + { + "bbox": [ + 145, + 211, + 177, + 218 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 165, + 219, + 441, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 219, + 441, + 262 + ], + "spans": [ + { + "bbox": [ + 165, + 219, + 441, + 262 + ], + "type": "text", + "content": "The user is asking us to identify potential influences or subgenres within rock music based on the provided instrumental track's instrumentation, chord progression, and overall sonic character. I will start with analyzing the chord progression, instrumentation, and overall sonic character of the music track. Then I will use these analyses to deduce potential influences and subgenres." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 165, + 270, + 441, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 270, + 441, + 316 + ], + "spans": [ + { + "bbox": [ + 165, + 270, + 441, + 316 + ], + "type": "text", + "content": "The music features a rock-oriented instrumental piece. The lead melody is carried by an electric guitar, featuring a slight overdrive or distortion. Acoustic rhythm guitars provide harmonic support, panned to the left and right. A slap-style bass guitar and acoustic drums form the rhythm section. The song follows a medium tempo in 4/4 time, with a chord progression of Bb, Ab6, F, and Ab6." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 165, + 323, + 441, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 323, + 441, + 419 + ], + "spans": [ + { + "bbox": [ + 165, + 323, + 441, + 419 + ], + "type": "text", + "content": " The chord progression, Bb, Ab6, F, Ab6, has blues rock similarities because it includes flat 7 chords. Electric guitar with overdrive or distortion, a driving rhythm section (bass and drums), and acoustic rhythm guitars are characteristics of rock music. Slap bass is used in funk music. Given these considerations, several potential influences and subgenres present themselves. The blues and rock elements of the chord progression could indicate the influence of blues rock and hard rock, which often incorporate blues-based progressions with louder, more aggressive instrumentation. The slap bassline suggests a funk influence, which can be found in funk rock and other rock subgenres that incorporate funk elements. The overall sonic character is most indicative of rock, with potential cross-pollination from blues and funk." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 165, + 428, + 438, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 428, + 438, + 463 + ], + "spans": [ + { + "bbox": [ + 165, + 428, + 438, + 463 + ], + "type": "text", + "content": "Based on the chord progression of Bb, Ab6, F, Ab6, the prominent electric guitar solo with overdrive, the acoustic rhythm guitars, the slap-style bass, and the acoustic drums, the music has potential influences or subgenres within rock music. These may include blues rock, hard rock, and funk rock." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 145, + 464, + 179, + 472 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 464, + 179, + 472 + ], + "spans": [ + { + "bbox": [ + 145, + 464, + 179, + 472 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 145, + 473, + 196, + 480 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 473, + 196, + 480 + ], + "spans": [ + { + "bbox": [ + 145, + 473, + 196, + 480 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 165, + 481, + 443, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 481, + 443, + 523 + ], + "spans": [ + { + "bbox": [ + 165, + 481, + 443, + 523 + ], + "type": "text", + "content": "Considering the instrumentation, the chord progression (Bb, Ab6, F, Ab6), and the overall sonic character of the piece, this instrumental track may draw upon influences from several subgenres within rock music. The chord progression has blues and rock similarities, pointing towards blues-rock and hard-rock elements. Moreover, the slap bassline indicates the touch of funk." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 145, + 525, + 198, + 533 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 525, + 198, + 533 + ], + "spans": [ + { + "bbox": [ + 145, + 525, + 198, + 533 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 121, + 171, + 147, + 198 + ], + "blocks": [ + { + "bbox": [ + 121, + 171, + 147, + 198 + ], + "lines": [ + { + "bbox": [ + 121, + 171, + 147, + 198 + ], + "spans": [ + { + "bbox": [ + 121, + 171, + 147, + 198 + ], + "type": "image", + "image_path": "a36cb05d818d194a4e6d2b64a41064f13fcd8e3fecc09d832b319f34886fd1c0.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 126, + 200, + 144, + 207 + ], + "lines": [ + { + "bbox": [ + 126, + 200, + 144, + 207 + ], + "spans": [ + { + "bbox": [ + 126, + 200, + 144, + 207 + ], + "type": "text", + "content": "User" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 175, + 176, + 459, + 208 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 176, + 459, + 208 + ], + "spans": [ + { + "bbox": [ + 175, + 176, + 459, + 208 + ], + "type": "text", + "content": "Considering the sounds present in the audio, describe the likely environment in which the dog is located and detail the types of interactions or activities the dog might be engaged in." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 176, + 209, + 282, + 228 + ], + "blocks": [ + { + "bbox": [ + 176, + 209, + 282, + 228 + ], + "lines": [ + { + "bbox": [ + 176, + 209, + 282, + 228 + ], + "spans": [ + { + "bbox": [ + 176, + 209, + 282, + 228 + ], + "type": "image", + "image_path": "6656392ff0178a74a688308bf440905dfacb28c80a2e0b33f331b3730e202032.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 145, + 254, + 176, + 261 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 254, + 176, + 261 + ], + "spans": [ + { + "bbox": [ + 145, + 254, + 176, + 261 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 164, + 262, + 434, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 262, + 434, + 289 + ], + "spans": [ + { + "bbox": [ + 164, + 262, + 434, + 289 + ], + "type": "text", + "content": "The question asks about the dog's environment and activities based on the audio. I need to carefully analyze the audio to identify relevant sounds, infer the setting, and deduce potential interactions." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 164, + 297, + 443, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 297, + 443, + 350 + ], + "spans": [ + { + "bbox": [ + 164, + 297, + 443, + 350 + ], + "type": "text", + "content": "The audio contains dog barks of varying intensity and pitch. There are also sounds suggesting movement, such as rustling or the distinct sounds of the dog moving on what sounds to be hard floor surfaces. These movement sounds occur both during and between the barking, with the barks themselves being the main focus of sound. The pitch of the bark changes at different points, from higher to lower and back again." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 165, + 358, + 219, + 365 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 358, + 219, + 365 + ], + "spans": [ + { + "bbox": [ + 165, + 358, + 219, + 365 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 164, + 367, + 441, + 454 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 164, + 367, + 433, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 367, + 433, + 402 + ], + "spans": [ + { + "bbox": [ + 164, + 367, + 433, + 402 + ], + "type": "text", + "content": "1. **Environment Inference:** The presence of hard floor surface sounds, coupled with the absence of specific outdoor environmental noises (e.g., wind, traffic, birdsong), leans towards an indoor location. The changing pitch and tone of barks show the arousal level is rapidly changing." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 164, + 402, + 426, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 402, + 426, + 427 + ], + "spans": [ + { + "bbox": [ + 164, + 402, + 426, + 427 + ], + "type": "text", + "content": "2. **Activity Inference:** The barking, interspersed with the sound of movement, indicates the dog is active. The varied bark intensity could suggest excitement, playfulness, or even alert behavior in relation to other sounds." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 164, + 428, + 441, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 428, + 441, + 454 + ], + "spans": [ + { + "bbox": [ + 164, + 428, + 441, + 454 + ], + "type": "text", + "content": "3. **Interaction Possibilities:** Based on the sounds alone, it's difficult to pinpoint the exact interaction. It could be playing with a toy, reacting to someone entering the room, or engaging in a game." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 164, + 463, + 441, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 463, + 441, + 506 + ], + "spans": [ + { + "bbox": [ + 164, + 463, + 441, + 506 + ], + "type": "text", + "content": "\\nThe dog is likely indoors on a hard floor surface. The audio suggests the dog is actively moving and barking, possibly engaged in play or reacting to something in its immediate surroundings. The dog is probably reacting to a stimulus within its environment, resulting in a change in the dog's arousal and tone and pitch of barks." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 145, + 507, + 178, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 507, + 178, + 514 + ], + "spans": [ + { + "bbox": [ + 145, + 507, + 178, + 514 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 145, + 516, + 196, + 523 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 516, + 196, + 523 + ], + "spans": [ + { + "bbox": [ + 145, + 516, + 196, + 523 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 164, + 524, + 441, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 524, + 441, + 576 + ], + "spans": [ + { + "bbox": [ + 164, + 524, + 441, + 576 + ], + "type": "text", + "content": "Based on the audio, the dog is likely located in an indoor environment with hard floor surface sounds. The varied intensity and sound of the dog's barks, combined with the sound of its movement, indicate the dog is actively moving and vocalizing, and that the arousal level of the dog is actively changing. Given this information, the dog might be engaged in some activity or play, perhaps reacting to a toy or another person within the room." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 145, + 577, + 198, + 585 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 577, + 198, + 585 + ], + "spans": [ + { + "bbox": [ + 145, + 577, + 198, + 585 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 462, + 240, + 487, + 267 + ], + "blocks": [ + { + "bbox": [ + 159, + 626, + 450, + 639 + ], + "lines": [ + { + "bbox": [ + 159, + 626, + 450, + 639 + ], + "spans": [ + { + "bbox": [ + 159, + 626, + 450, + 639 + ], + "type": "text", + "content": "Figure 8: Example of Audio-Reasoner answering sound-based question." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 462, + 240, + 487, + 267 + ], + "lines": [ + { + "bbox": [ + 462, + 240, + 487, + 267 + ], + "spans": [ + { + "bbox": [ + 462, + 240, + 487, + 267 + ], + "type": "image", + "image_path": "aff642ef84ae66fc0319f623312be2998e8ccbd4665d511a6391ba6638e6705a.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 457, + 268, + 492, + 285 + ], + "lines": [ + { + "bbox": [ + 457, + 268, + 492, + 285 + ], + "spans": [ + { + "bbox": [ + 457, + 268, + 492, + 285 + ], + "type": "text", + "content": "Audio Reasoner" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 121, + 171, + 148, + 198 + ], + "blocks": [ + { + "bbox": [ + 121, + 171, + 148, + 198 + ], + "lines": [ + { + "bbox": [ + 121, + 171, + 148, + 198 + ], + "spans": [ + { + "bbox": [ + 121, + 171, + 148, + 198 + ], + "type": "image", + "image_path": "250663b1591e6ae5dbf7df5a0b76757a799f877069a3d178ee425094da48c030.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 126, + 200, + 144, + 207 + ], + "lines": [ + { + "bbox": [ + 126, + 200, + 144, + 207 + ], + "spans": [ + { + "bbox": [ + 126, + 200, + 144, + 207 + ], + "type": "text", + "content": "User" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 175, + 176, + 382, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 176, + 382, + 186 + ], + "spans": [ + { + "bbox": [ + 175, + 176, + 382, + 186 + ], + "type": "text", + "content": "What did the second person say about the rose bush?" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 176, + 187, + 305, + 228 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 176, + 187, + 233, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 187, + 233, + 196 + ], + "spans": [ + { + "bbox": [ + 176, + 187, + 233, + 196 + ], + "type": "text", + "content": "(a) They like it." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 176, + 197, + 305, + 206 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 197, + 305, + 206 + ], + "spans": [ + { + "bbox": [ + 176, + 197, + 305, + 206 + ], + "type": "text", + "content": "(b) It's the bane of their existence." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 176, + 207, + 276, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 207, + 276, + 217 + ], + "spans": [ + { + "bbox": [ + 176, + 207, + 276, + 217 + ], + "type": "text", + "content": "(c) It needs more fertilizer." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 176, + 217, + 278, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 217, + 278, + 228 + ], + "spans": [ + { + "bbox": [ + 176, + 217, + 278, + 228 + ], + "type": "text", + "content": "(d) It provides good shade." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "type": "image", + "bbox": [ + 176, + 229, + 282, + 247 + ], + "blocks": [ + { + "bbox": [ + 176, + 229, + 282, + 247 + ], + "lines": [ + { + "bbox": [ + 176, + 229, + 282, + 247 + ], + "spans": [ + { + "bbox": [ + 176, + 229, + 282, + 247 + ], + "type": "image", + "image_path": "18e4c56940d96a5d28d7df8cd5be561661e52ea20dcad7561fe0902a688b15f5.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 145, + 262, + 177, + 270 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 262, + 177, + 270 + ], + "spans": [ + { + "bbox": [ + 145, + 262, + 177, + 270 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 164, + 271, + 438, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 271, + 438, + 315 + ], + "spans": [ + { + "bbox": [ + 164, + 271, + 438, + 315 + ], + "type": "text", + "content": "The question asks about what the second person said about the rose bush. The audio presents a conversation between two neighbors, Amelia and Ben. I need to identify who the second speaker is and what they said about the rose bush. Then, I need to match what the second speaker said with the given options." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 164, + 323, + 436, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 323, + 436, + 341 + ], + "spans": [ + { + "bbox": [ + 164, + 323, + 436, + 341 + ], + "type": "text", + "content": "The audio presents a conversation between Amelia and Ben about the upcoming block party and a troublesome rose bush." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 164, + 341, + 439, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 341, + 439, + 376 + ], + "spans": [ + { + "bbox": [ + 164, + 341, + 439, + 376 + ], + "type": "text", + "content": "**Speaker 1 (Amelia)**: Asks Ben if he's attending the block party and mentions the rose bush causing him trouble. She says \"You mentioned weedies, that rose bush still causing you trouble, the one that keeps growing over the fence?\". She also mentions she has been scratched by it." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 164, + 376, + 439, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 376, + 439, + 419 + ], + "spans": [ + { + "bbox": [ + 164, + 376, + 439, + 419 + ], + "type": "text", + "content": "**Speaker 2 (Ben)**: Confirms he's attending the block party, entering his chili in the cook-off, and complains about the rose bush. He says \"That thorny rose bush is the bane of my existence. It's constantly scratching me when I am gardening. So, yes, it's still causing me trouble.\" and suggests chopping it down. " + }, + { + "bbox": [ + 164, + 376, + 439, + 419 + ], + "type": "inline_equation", + "content": "\\backslash" + }, + { + "bbox": [ + 164, + 376, + 439, + 419 + ], + "type": "text", + "content": "n\\`nTherefore, Ben is the second speaker." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 164, + 428, + 219, + 436 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 428, + 219, + 436 + ], + "spans": [ + { + "bbox": [ + 164, + 428, + 219, + 436 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 164, + 437, + 436, + 515 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 164, + 437, + 436, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 437, + 436, + 445 + ], + "spans": [ + { + "bbox": [ + 164, + 437, + 436, + 445 + ], + "type": "text", + "content": "1. **Identify the second speaker:** Based on the audio, Ben is the second speaker." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 164, + 446, + 436, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 446, + 436, + 472 + ], + "spans": [ + { + "bbox": [ + 164, + 446, + 436, + 472 + ], + "type": "text", + "content": "2. **Identify what Ben said about the rose bush:** Ben says \"That thorny rose bush is the bane of my existence. It's constantly scratching me when I am gardening. So, yes, it's still causing me trouble.\" and suggests chopping it down." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 164, + 472, + 315, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 472, + 315, + 480 + ], + "spans": [ + { + "bbox": [ + 164, + 472, + 315, + 480 + ], + "type": "text", + "content": "3. **Match Ben's statement with the options:**" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 164, + 481, + 419, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 481, + 419, + 489 + ], + "spans": [ + { + "bbox": [ + 164, + 481, + 419, + 489 + ], + "type": "text", + "content": "- (a) They like it. - This is incorrect because Ben clearly dislikes the rose bush." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 164, + 490, + 423, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 490, + 423, + 498 + ], + "spans": [ + { + "bbox": [ + 164, + 490, + 423, + 498 + ], + "type": "text", + "content": "- (b) It's the bane of their existence. - This aligns perfectly with Ben's statement." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 164, + 498, + 373, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 498, + 373, + 506 + ], + "spans": [ + { + "bbox": [ + 164, + 498, + 373, + 506 + ], + "type": "text", + "content": "- (c) It needs more fertilizer. - This is not mentioned in the audio." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 164, + 507, + 391, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 507, + 391, + 515 + ], + "spans": [ + { + "bbox": [ + 164, + 507, + 391, + 515 + ], + "type": "text", + "content": "- (d) It provides good shade. - This is also not mentioned in the audio." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 165, + 516, + 337, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 516, + 337, + 524 + ], + "spans": [ + { + "bbox": [ + 165, + 516, + 337, + 524 + ], + "type": "text", + "content": "Therefore, the correct answer is (b)." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 164, + 533, + 431, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 533, + 431, + 559 + ], + "spans": [ + { + "bbox": [ + 164, + 533, + 431, + 559 + ], + "type": "text", + "content": "\\nBased on the audio, the second speaker, Ben, describes the rose bush as \"the bane of my existence\" and complains about it scratching him. Therefore, the answer is (b). " + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 145, + 559, + 179, + 567 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 559, + 179, + 567 + ], + "spans": [ + { + "bbox": [ + 145, + 559, + 179, + 567 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 145, + 568, + 196, + 576 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 568, + 196, + 576 + ], + "spans": [ + { + "bbox": [ + 145, + 568, + 196, + 576 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 164, + 577, + 423, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 577, + 423, + 594 + ], + "spans": [ + { + "bbox": [ + 164, + 577, + 423, + 594 + ], + "type": "text", + "content": "The second person, Ben, explicitly states that the rose bush " + }, + { + "bbox": [ + 164, + 577, + 423, + 594 + ], + "type": "inline_equation", + "content": "\"is the bane of my existence\"" + }, + { + "bbox": [ + 164, + 577, + 423, + 594 + ], + "type": "text", + "content": ". Therefore, the answer is B." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 145, + 594, + 198, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 594, + 198, + 602 + ], + "spans": [ + { + "bbox": [ + 145, + 594, + 198, + 602 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 461, + 264, + 487, + 289 + ], + "blocks": [ + { + "bbox": [ + 128, + 626, + 480, + 639 + ], + "lines": [ + { + "bbox": [ + 128, + 626, + 480, + 639 + ], + "spans": [ + { + "bbox": [ + 128, + 626, + 480, + 639 + ], + "type": "text", + "content": "Figure 9: Example of Audio-Reasoner answering multi-speaker speech-based question." + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 461, + 264, + 487, + 289 + ], + "lines": [ + { + "bbox": [ + 461, + 264, + 487, + 289 + ], + "spans": [ + { + "bbox": [ + 461, + 264, + 487, + 289 + ], + "type": "image", + "image_path": "e1fbcf9ad869804d415d3768a1bafdb4f20a43d182c0fe8e203fecf5c5920fde.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 461, + 291, + 485, + 298 + ], + "lines": [ + { + "bbox": [ + 461, + 291, + 485, + 298 + ], + "spans": [ + { + "bbox": [ + 461, + 291, + 485, + 298 + ], + "type": "text", + "content": "Audio" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 456, + 300, + 492, + 308 + ], + "lines": [ + { + "bbox": [ + 456, + 300, + 492, + 308 + ], + "spans": [ + { + "bbox": [ + 456, + 300, + 492, + 308 + ], + "type": "text", + "content": "Reasoner" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + } + ], + "index": 30 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 121, + 171, + 147, + 198 + ], + "blocks": [ + { + "bbox": [ + 121, + 171, + 147, + 198 + ], + "lines": [ + { + "bbox": [ + 121, + 171, + 147, + 198 + ], + "spans": [ + { + "bbox": [ + 121, + 171, + 147, + 198 + ], + "type": "image", + "image_path": "bf0107d5c8c65914f99bf9fa3e65a54865c8b756def2b36ec09a1f74a1a04e7b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 126, + 200, + 144, + 207 + ], + "lines": [ + { + "bbox": [ + 126, + 200, + 144, + 207 + ], + "spans": [ + { + "bbox": [ + 126, + 200, + 144, + 207 + ], + "type": "text", + "content": "User" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 175, + 176, + 434, + 187 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 176, + 434, + 187 + ], + "spans": [ + { + "bbox": [ + 175, + 176, + 434, + 187 + ], + "type": "text", + "content": "Given that you hear buzzing plastic, identify the background sound?" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 176, + 187, + 297, + 228 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 176, + 187, + 297, + 197 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 187, + 297, + 197 + ], + "spans": [ + { + "bbox": [ + 176, + 187, + 297, + 197 + ], + "type": "text", + "content": "(a). A washing machine running" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 176, + 198, + 285, + 206 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 198, + 285, + 206 + ], + "spans": [ + { + "bbox": [ + 176, + 198, + 285, + 206 + ], + "type": "text", + "content": "(b). Rain falling on a window" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 176, + 208, + 250, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 208, + 250, + 217 + ], + "spans": [ + { + "bbox": [ + 176, + 208, + 250, + 217 + ], + "type": "text", + "content": "(c). A toilet flushing" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 176, + 218, + 296, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 218, + 296, + 228 + ], + "spans": [ + { + "bbox": [ + 176, + 218, + 296, + 228 + ], + "type": "text", + "content": "(d). Waves crashing on a shore" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "type": "image", + "bbox": [ + 176, + 229, + 282, + 248 + ], + "blocks": [ + { + "bbox": [ + 176, + 229, + 282, + 248 + ], + "lines": [ + { + "bbox": [ + 176, + 229, + 282, + 248 + ], + "spans": [ + { + "bbox": [ + 176, + 229, + 282, + 248 + ], + "type": "image", + "image_path": "f73ca89ffd4040b5bfdf6ba30b94d0ae554820407dfdcdd29189fdf86508fba7.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 145, + 262, + 177, + 270 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 262, + 177, + 270 + ], + "spans": [ + { + "bbox": [ + 145, + 262, + 177, + 270 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 165, + 271, + 212, + 278 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 271, + 212, + 278 + ], + "spans": [ + { + "bbox": [ + 165, + 271, + 212, + 278 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 164, + 280, + 442, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 280, + 442, + 323 + ], + "spans": [ + { + "bbox": [ + 164, + 280, + 442, + 323 + ], + "type": "text", + "content": "I will carefully and methodically approach the task of identifying the background sound in the audio clip. This will allow me to focus solely on the characteristics of the background noise. Once the background sound is isolated, I will analyze it in detail to determine its key qualities, such as its consistency, rhythm, and texture. Afterward, I will compare these characteristics to the provided answer options." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 164, + 332, + 439, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 332, + 439, + 384 + ], + "spans": [ + { + "bbox": [ + 164, + 332, + 439, + 384 + ], + "type": "text", + "content": " Upon my initial listening, I observed that the background sound possesses characteristics that suggest fluid movement. There is a continuous, almost rhythmic, sound with subtle gurgling and sloshing. This could indicate the presence of water or some other flowing liquid. As I continue to listen more carefully, the flow becomes even more evident, suggesting a consistent rush or stream-like sound. " + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 165, + 393, + 218, + 401 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 393, + 218, + 401 + ], + "spans": [ + { + "bbox": [ + 165, + 393, + 218, + 401 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 164, + 403, + 441, + 533 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 164, + 403, + 439, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 403, + 439, + 437 + ], + "spans": [ + { + "bbox": [ + 164, + 403, + 439, + 437 + ], + "type": "text", + "content": "1. **Sound Characteristics:** My first observation is that the background sound features a continuous, almost rhythmic quality, with faint gurgling noises. These characteristics often point to a fluid in motion. Upon further listening, I recognize that the sound might be created by a liquid flowing from one place to another." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 164, + 437, + 440, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 437, + 440, + 463 + ], + "spans": [ + { + "bbox": [ + 164, + 437, + 440, + 463 + ], + "type": "text", + "content": "2. **Detailed Listening:** I focus on the quieter sections of the sound, where the fluid dynamics become clearer. The consistent, almost rushing quality suggests the movement of water rather than irregular drops or splashes." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 164, + 463, + 441, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 463, + 441, + 506 + ], + "spans": [ + { + "bbox": [ + 164, + 463, + 441, + 506 + ], + "type": "text", + "content": "3. **Comparison to Options:** With this in mind, I now compare the sound characteristics to the provided answer options. - \"A toilet flushing,\" which often involves a steady rush of water with gurgling and sloshing as the water flows through pipes. This closely matches the consistent flow and gurgling noises I hear in the background." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 164, + 507, + 435, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 507, + 435, + 533 + ], + "spans": [ + { + "bbox": [ + 164, + 507, + 435, + 533 + ], + "type": "text", + "content": "4. **Conclusion:** Based on my analysis, the sound best aligns with the description of a toilet flushing, due to the nature of the consistent, rushing water and subtle gurgling sounds. " + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 145, + 533, + 178, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 533, + 178, + 540 + ], + "spans": [ + { + "bbox": [ + 145, + 533, + 178, + 540 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 145, + 541, + 196, + 549 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 541, + 196, + 549 + ], + "spans": [ + { + "bbox": [ + 145, + 541, + 196, + 549 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 164, + 551, + 439, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 551, + 439, + 594 + ], + "spans": [ + { + "bbox": [ + 164, + 551, + 439, + 594 + ], + "type": "text", + "content": "After listening to the audio carefully, isolating the background sound, and comparing it to the answer options, I concluded that the background sound most closely matches the sound of a toilet flushing. The key factors leading to this conclusion include the continuous rush of water and the gurgling quality, which are typical of a flushing toilet." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 145, + 594, + 197, + 602 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 594, + 197, + 602 + ], + "spans": [ + { + "bbox": [ + 145, + 594, + 197, + 602 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 461, + 264, + 487, + 289 + ], + "blocks": [ + { + "bbox": [ + 138, + 626, + 470, + 639 + ], + "lines": [ + { + "bbox": [ + 138, + 626, + 470, + 639 + ], + "spans": [ + { + "bbox": [ + 138, + 626, + 470, + 639 + ], + "type": "text", + "content": "Figure 10: Example of Audio-Reasoner answering complex-audio-based question." + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 461, + 264, + 487, + 289 + ], + "lines": [ + { + "bbox": [ + 461, + 264, + 487, + 289 + ], + "spans": [ + { + "bbox": [ + 461, + 264, + 487, + 289 + ], + "type": "image", + "image_path": "02e9abd6638b2b63d0c5724c13130da9bc69a42b07a4d1726ec78a20919f111b.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 462, + 291, + 485, + 299 + ], + "lines": [ + { + "bbox": [ + 462, + 291, + 485, + 299 + ], + "spans": [ + { + "bbox": [ + 462, + 291, + 485, + 299 + ], + "type": "text", + "content": "Audio" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 457, + 300, + 492, + 308 + ], + "lines": [ + { + "bbox": [ + 457, + 300, + 492, + 308 + ], + "spans": [ + { + "bbox": [ + 457, + 300, + 492, + 308 + ], + "type": "text", + "content": "Reasoner" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2503_02xxx/2503.02324/ccb8695f-9db7-4b2d-953a-29631612cfb2_content_list.json b/data/2025/2503_02xxx/2503.02324/ccb8695f-9db7-4b2d-953a-29631612cfb2_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..acca798b1f574beb86e5974d5672f2adf114caeb --- /dev/null +++ b/data/2025/2503_02xxx/2503.02324/ccb8695f-9db7-4b2d-953a-29631612cfb2_content_list.json @@ -0,0 +1,4552 @@ +[ + { + "type": "text", + "text": "PROMPTCOT: Synthesizing Olympiad-level Problems for Mathematical Reasoning in Large Language Models", + "text_level": 1, + "bbox": [ + 127, + 89, + 870, + 130 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xueliang Zhao\\* Wei Wu\\* Jian Guan\\* Lingpeng Kong", + "bbox": [ + 213, + 149, + 784, + 168 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$\\star$ The University of Hong Kong $\\star$ Ant Group", + "bbox": [ + 307, + 170, + 690, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{xlzhao,lpk}@cs.hku.hk", + "bbox": [ + 386, + 187, + 611, + 200 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{wuwei19850318, jianguanthu}@gmail.com", + "bbox": [ + 307, + 203, + 690, + 219 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 260, + 260, + 339, + 275 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The ability of large language models to solve complex mathematical problems has progressed significantly, particularly for tasks requiring advanced reasoning. However, the scarcity of sufficiently challenging problems, particularly at the Olympiad level, hinders further advancements. In this work, we introduce PROMPTcot, a novel approach for automatically generating high-quality Olympiad-level math problems. The proposed method synthesizes complex problems based on mathematical concepts and the rationale behind problem construction, emulating the thought processes of experienced problem designers. We provide a theoretical analysis demonstrating that an optimal rationale should maximize both the likelihood of rationale generation given the associated concepts and the likelihood of problem generation conditioned on both the rationale and the concepts. Our method is evaluated on standard benchmarks including GSM8K, MATH-500, and AIME2024, where it consistently outperforms existing problem generation methods. Furthermore, we demonstrate that PROMPTcot exhibits superior data scalability, consistently maintaining high performance as the dataset size increases, outperforming the baselines. The implementation is available at https://github.com/zhaoxlpu/PromptCoT.", + "bbox": [ + 144, + 288, + 460, + 714 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 114, + 738, + 258, + 753 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "\"In mathematics the art of proposing a question must be held of higher value than solving it.\"", + "bbox": [ + 149, + 764, + 450, + 810 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Georg Cantor", + "bbox": [ + 329, + 825, + 450, + 838 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent advancements in large language models (LLMs) have greatly enhanced their capability for", + "bbox": [ + 112, + 854, + 487, + 887 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "solving complex problems through planning and reasoning. Particularly in mathematics, strong reasoning models such as OpenAI o1 (Jaech et al., 2024) and DeepSeek r1 (Guo et al., 2025) have significantly pushed the boundaries of AI from mastering grade school problems (Yang et al., 2024b) to excelling at Olympiad-level challenges (Guo et al., 2025). The remarkable achievements have inspired the community to explore dedicating more computational resources to the inference stage. Consequently, the scaling paradigm of LLMs is shifting from training time to test time (Snell et al., 2024). While powerful reasoning LLMs, such as DeepSeek r1, have been open-sourced, the details of math problem acquisition remain obscured in the published literature (e.g., the technical report of DeepSeek r1), and such data is still kept private. Consequently, studies aiming to reproduce o1-like or r1-like performance have to rely on open-source math datasets (Li et al., 2024b), which are often constrained by scale and difficulty. This leaves an open research question $(RQ)$ : how can we obtain high-quality, sufficiently challenging math problems at scale? We emphasize that $RQ$ is crucial for test-time scaling research, as it serves as a prerequisite for effectively initiating the supervised fine-tuning (SFT) or reinforcement learning (RL) process.", + "bbox": [ + 507, + 261, + 884, + 711 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We investigate the automated generation of Olympiad-level math problems as a principled approach to addressing $RQ$ . Prior to our work, several projects have curated math datasets, typically involving problem synthesis procedures. Existing synthesis methods can be categorized into three frameworks: (1) direct generation through prompting powerful LLMs (Huang et al., 2024; Tang et al., 2024; Li et al., 2024b; Toshniwal et al., 2024), (2) mining from pretraining corpora (Yue et al., 2023; Li et al., 2024d), and (3) problem evolution, either through simple-to-simple rephrasing (Yu et al., 2023) or difficulty-based progression (Xu et al.,", + "bbox": [ + 507, + 712, + 884, + 921 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2503.02324v1 [cs.CL] 4 Mar 2025", + "bbox": [ + 21, + 310, + 60, + 717 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* This work was done during an internship at Ant Group.", + "bbox": [ + 134, + 892, + 485, + 906 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "† Corresponding authors.", + "bbox": [ + 136, + 906, + 295, + 920 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 494, + 942, + 502, + 954 + ], + "page_idx": 0 + }, + { + "type": "table", + "img_path": "images/b22c967d92425929a2a42d62c624f3702e85eddea995f28e46d6f3c9bf5c14dd.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetQwen2.5-Math-72B-Instruct Accuracy (↓)DeepSeek-R1-Distill-Qwen-7B Avg. Reasoning Tokens (↑)Micro Avg. Accuracy on MATH-500 and AIME2024
Fine-tuned Qwen2.5-Math-7BΔ (↑)
AIME202430.04,159--
PROMPTcot (ours)48.96,50280.8+1.2
OpenMathInstruct (Toshniwal et al., 2024)63.31,57875.8-3.8
NuminaMath (Li et al., 2024b)60.93,03976.6-3.0
Evol-Instruct (Luo et al., 2023)65.91,34674.0-5.6
KPDDS (Huang et al., 2024)73.21,22572.3-7.3
", + "bbox": [ + 112, + 80, + 882, + 175 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Table 1: Difficulty and efficacy evaluation for different mathematical datasets. Accuracy: Performance of Qwen2.5-Math-72B-Instruct on the problems in different datasets. Note that we directly apply the model to solve the problems without any tuning, so the metric reflects difficulty of the problems for Qwen2.5-Math-72B-Instruct (↓: lower accuracy indicates higher difficulty). Avg. Reasoning Tokens: Average number of tokens in reasoning processes generated by DeepSeek-R1-Distill-Qwen-7B when processing the problems in different datasets (↑: larger numbers means DeepSeek-R1-Distill-Qwen-7B needs more tokens to complete reasoning, suggesting greater problem difficulty). Micro Avg. Accuracy on MATH-500 and AIME2024: Performance of Qwen2.5-Math-7B after fine-tuning on different datasets. Accuracy is computed as a weighted average over MATH-500 and AIME 2024. Note that fine-tuning is conducted using both the problems and their corresponding solutions. Further details are provided in Section 3. △: Difference in performance between Qwen2.5-Math-7B and Qwen2.5-Math-7B-Instruct (↑: larger margins suggests bigger contributions from the corresponding datasets). Note that no tuning is performed on Qwen2.5-Math-7B-Instruct, so its Micro Avg. Accuracy remains fixed at 79.6.", + "bbox": [ + 110, + 184, + 884, + 356 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2023; Luo et al., 2023). While these efforts progressively contribute to LLM reasoning, the problems provided by these methods now are not challenging enough for state-of-the-art models, making them less effective in further advancements. Table 1 provides a detailed analysis of problem difficulty in typical published works and open-source datasets, along with their efficacy in enhancing state-of-the-art LLMs. From this, we observe clear gaps in terms of difficulty between the problems in existing work and those in AIME, as well as the limited utility of these problems in further improving LLM performance.", + "bbox": [ + 110, + 380, + 489, + 589 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The primary challenge in synthesizing Olympiad-level math problems lies in their scarcity in existing corpora. As a result, the complex reasoning patterns required for such problems are undertrained, making it difficult for LLMs to assign sufficient probability to them during inference. More broadly, what we aim to explore is by nature a low-resource generation problem, which is prevalent across various applications yet inherently contrasts with the fundamental working mechanisms of LLMs. We focus on the mathematical domain, but our method can be easily adapted to other domains. Specifically, we propose PROMPTCOt, a novel problem generation method that synthesizes a complex math problem based on given math concepts (e.g., \"Prime Numbers\", c.f. Figure 1) and a rationale that emulates a veteran teacher's thought process when designing math problems for students. The idea is inspired by the success of LLMs in \"solving\" difficult", + "bbox": [ + 110, + 599, + 489, + 921 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "problems, where detailed reasoning procedures (e.g., chain-of-thoughts (Wei et al., 2022a)) have proven particularly effective. Through theoretical analysis, we show that an optimal rationale should simultaneously maximize the likelihood of rationale generation given the concepts and the likelihood of problem generation conditioned on both the rationale and the concepts. Based on this analysis, we implement PROMPTCoT by first querying an LLM to generate rationales for prepared Olympiad-level math problems and their associated concepts, and then fine-tuning an LLM as a problem generation model using a set of concept-rationale-problem triples.", + "bbox": [ + 505, + 380, + 884, + 605 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We conduct extensive experiments on multiple benchmarks focused on mathematical reasoning, including GSM8K (Cobbe et al., 2021), MATH-500 (Lightman et al., 2023), and AIME2024 (AIME-2024). Evaluation results demonstrate that, compared to a range of existing generation methods and datasets, PROMPTCoT leads to more significant improvements on state-of-the-art LLMs after distillation in both short-CoT and long-CoT settings, achieving $0.4\\% - 4.8\\%$ absolute gains on MATH-500 and $6.7\\% - 20\\%$ absolute gains on AIME2024. An extended experiment further shows that PROMPTCoT exhibits remarkable advantages over the baseline method as the number of problems gradually increases, highlighting its superior data scalability.", + "bbox": [ + 507, + 611, + 885, + 868 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our contributions are three-fold: (1) We propose synthesizing Olympiad-level math problems to enhance LLM reasoning. While previous re", + "bbox": [ + 507, + 873, + 884, + 921 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 492, + 942, + 505, + 954 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/4b8dae9c8a073f5f2d8a9395e0e8c8918cefb3fc1d1f3a4fedafce05ba53e6d2.jpg", + "image_caption": [ + "Figure 1: Overview of the proposed method. Left: Given an Olympiad problem and its foundational concepts (extracted using an LLM), the goal is to identify rationales that act as “amplifiers” to enhance the likelihood of generating the Olympiad problem (i.e., $p(x \\mid z, \\mathbf{c})$ ) while ensuring that these rationales can be easily derived from the foundational concepts (i.e., $p(z \\mid \\mathbf{c})$ ). Right: Once an appropriate rationale is found, we use the (foundational concepts, rationale, Olympiad problem) triple to train a model capable of predicting both the rationale and the Olympiad problem from any given set of concepts. The model can be further optimized through rejection sampling, where the quality of generated outputs is evaluated and used for subsequent model training." + ], + "image_footnote": [], + "bbox": [ + 193, + 85, + 803, + 233 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "search (Snell et al., 2024) has identified problem difficulty as a key factor in improving LLMs' reasoning capabilities, to the best of our knowledge, we are the first to formally pose difficult problem generation as a research question (i.e., $RQ$ ); (2) We propose PROMPTCoT as a principled approach to generating Olympiad-level math problems. To the best of our knowledge, we are the first to apply the chain-of-thought paradigm to the task of problem generation; and (3) We conduct extensive experiments to evaluate the efficacy of PROMPTCoT. Notably, PROMPTCoT is the only method that enables Qwen2.5-Math base models to surpass their Instruct versions. Furthermore, a 7B model distilled using the problems generated by PROMPTCoT is able to achieve performance comparable to state-of-the-art 32B models on mathematical reasoning tasks.", + "bbox": [ + 112, + 370, + 489, + 659 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Method", + "text_level": 1, + "bbox": [ + 112, + 671, + 218, + 686 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Figure 1 provides an overview of PROMPTCoT. In summary, our method comprises three key steps: (1) Concept Extraction: Foundational concepts are extracted from seed prompts collected from Olympiad math sources ( $\\S 2.1$ ); (2) Rationale Generation: A rationale is inferred for each problem based on its associated concepts by maximizing both the probability of the rationale given the concepts and the probability of the problem given both the rationale and the concepts ( $\\S 2.2$ ); and (3) Problem Generation Model Training: Concep-tionale-problem triples are used to fine-tune an LLM as a problem generation model, followed by a rejection sampling fine-tuning stage for further", + "bbox": [ + 112, + 696, + 489, + 921 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "self-improvement, ensuring the quality of generated problems (§2.3).", + "bbox": [ + 507, + 370, + 884, + 401 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 Concept Extraction", + "text_level": 1, + "bbox": [ + 507, + 416, + 709, + 432 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We collect a large set of seed prompts from the AoPS1 platform which contains challenging Olympiad-level math problems. Problems overlapping with common test benchmarks are filtered out to prevent data leakage. For each seed prompt, domain-specific concepts are extracted using a large language model. The model is provided with a query instruction (see Appendix A for details) that directs it to identify the salient concepts associated with the problem. While tailored to mathematics in our study, these concepts serve as a concise abstraction of the underlying reasoning in problem design and can be readily adapted to other domains by replacing the seed prompts with those relevant to the target domain.", + "bbox": [ + 505, + 439, + 884, + 680 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2 Rationale Generation", + "text_level": 1, + "bbox": [ + 507, + 695, + 724, + 709 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For effective problem generation, the underlying rationale should (i) be naturally derived from a set of foundational concepts and (ii) increase the likelihood of producing a high-quality problem. To capture this idea, let us denote the set of extracted $K$ concepts by", + "bbox": [ + 507, + 717, + 882, + 813 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {c} = \\left\\{c _ {1}, c _ {2}, \\dots , c _ {K} \\right\\}.\n$$\n", + "text_format": "latex", + "bbox": [ + 611, + 829, + 778, + 847 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We introduce a latent variable $z$ that represents the rationale connecting these concepts to the original", + "bbox": [ + 507, + 862, + 880, + 894 + ], + "page_idx": 2 + }, + { + "type": "page_footnote", + "text": "4. For concept extraction, we construct a dataset consisting of 6,365 seed prompts. We use Llama-3.1-70B-Instruct to extract the relevant concepts, with the number of concepts per problem set to $k = 5$ . In the rationale generation phase, we leverage Llama-3.1-70B-Instruct (Dubey et al., 2024), Qwen2.5-72B-Instruct (Yang et al., 2024a), and Qwen2.5-32B-Instruct to diversify the", + "bbox": [ + 507, + 533, + 885, + 840 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "$^{2}$ https://huggingface.co/AI-MO", + "bbox": [ + 532, + 845, + 752, + 858 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "3https://huggingface.co/datasets/nvidia/", + "bbox": [ + 510, + 859, + 833, + 870 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "OpenMathInstruct-2", + "bbox": [ + 510, + 871, + 652, + 882 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "4We generate significantly fewer problems in the long-CoT setting than in the shot-CoT setting, as solution generation in the long-CoT setting is considerably more costly.", + "bbox": [ + 509, + 883, + 882, + 921 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 492, + 942, + 504, + 954 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/338ef14e4b9eb6cdebfad225469eb938ca3ac55f0808df1f37145c040aaddf4c.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelsBaseGSM8KMATH-500AIME2024
short-CoT
Qwen2.5-Math-1.5B-Instruct-84.875.810.0
Openmathinstruct-1.5BQwen2.5-Math-1.5B84.963.813.3
NuminaMath-1.5BQwen2.5-Math-1.5B85.169.016.7
Evol-Instruct-1.5BQwen2.5-Math-1.5B84.667.410.0
KPDDS-1.5BQwen2.5-Math-1.5B83.464.46.7
PROMPTcot-Qwen-1.5BQwen2.5-Math-1.5B87.178.426.7
long-CoT
DeepSeek-R1-Distill-Qwen-1.5B†-85.180.223.3
PROMPTcot-DS-1.5BDeepSeek-R1-Distill-Qwen-1.5B86.185.043.3
", + "bbox": [ + 194, + 80, + 801, + 227 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/e99b3fe444fea355d9cb66a6818b9858a6c452f9fc6cbff28431efba50460774.jpg", + "table_caption": [ + "Table 2: Evaluation results across three mathematical reasoning benchmarks for models with 1.5B parameters. Bold numbers indicate the highest performance in each respective setting. $\\dagger$ indicates results reproduced using our prompt." + ], + "table_footnote": [], + "table_body": "
ModelsBaseGSM8KMATH-500AIME2024
short-CoT
Qwen2.5-Math-7B-Instruct-95.283.613.3
Openmathinstruct-7BQwen2.5-Math-7B92.079.610.0
NuminaMath-7BQwen2.5-Math-7B92.981.820.0
Evol-Instruct-7BQwen2.5-Math-7B88.577.416.7
KPDDS-7BQwen2.5-Math-7B89.976.010.0
PROMPTcot-Qwen-7BQwen2.5-Math-7B93.384.026.7
long-CoT
DeepSeek-R1-Distill-Qwen-7B†-91.791.643.3
PROMPTcot-DS-7BDeepSeek-R1-Distill-Qwen-7B92.693.060.0
", + "bbox": [ + 194, + 292, + 801, + 444 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "dataset, resulting in a total of 19,095 conceptualization triple. We initialize the problem generation model with Llama-3.1-8B. During the MLE training stage, we employ a learning rate of $2 \\times 10^{-5}$ and a batch size of 64. In the rejection sampling phase, we perform three rounds of evaluation, using Llama-3.1-70B-Instruct and Qwen2.5-72B-Instruct as evaluators. For the rejection sampling process, we continue to use the same learning rate of $2 \\times 10^{-5}$ and batch size of 64 as in the MLE training stage. All experiments are conducted on 8×A100 80GB machines.", + "bbox": [ + 110, + 521, + 489, + 715 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.4 Main Results", + "text_level": 1, + "bbox": [ + 112, + 734, + 265, + 749 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The results of our experiments, presented in Tables 2 and 3, reveal the following key insights: (1) Our method achieves state-of-the-art performance across multiple benchmarks, outperforming the baselines on both short-CoT and long-CoT settings. This highlights the efficacy of our rationale-driven approach in generating high-quality problems; (2) As the problem difficulty increases from GSM8K to MATH-500 and AIME2024, our method's advantage becomes more pronounced. This is par", + "bbox": [ + 110, + 760, + 489, + 921 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "ticularly evident on AIME2024, which demands more advanced reasoning and problem construction. Our approach excels at generating Olympiad-level problems, capturing the more complex reasoning needed for such tasks; and (3) Our method further enhances the performance of long-CoT models like DeepSeek-R1-Distill-Qwen. The generation of Olympiad-level problems, with their higher complexity, taps into the full potential of long-CoT reasoning, enabling deeper and more effective reasoning processes.", + "bbox": [ + 507, + 521, + 884, + 699 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/963e780b7d08fe11d9a9a51df38095c1464a44a013f03aa9c6d0a0a1d492440b.jpg", + "table_caption": [ + "Table 3: Evaluation results across three mathematical reasoning benchmarks for models with 7B parameters. Bold numbers indicate the highest performance in each respective setting. $\\dagger$ indicates results reproduced using our prompt." + ], + "table_footnote": [], + "table_body": "
ModelsGSM8KMATH-500AIME2024
PROMPTCOt (full)87.178.426.7
- rationale82.367.010.0
- optimal86.372.616.7
- rejection sampling85.975.220.0
", + "bbox": [ + 512, + 711, + 878, + 793 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 4: Ablation study results for 1.5B parameter models. Bold numbers indicate the highest performance.", + "bbox": [ + 507, + 802, + 882, + 831 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4 Discussions", + "text_level": 1, + "bbox": [ + 507, + 862, + 643, + 877 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In addition to the extensive evaluation across multiple benchmarks, we seek to further understand the", + "bbox": [ + 507, + 889, + 882, + 921 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 492, + 942, + 505, + 954 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "underlying mechanisms of PROMPTCoT. Specifically, we explore the following research questions: (1) RQ1: How do the different components of PROMPTCoT contribute to its performance? (2) RQ2: How does the difficulty of the problems generated by PROMPTCoT compare to those from typical published works and open-source datasets? (3) RQ3: How does PROMPTCoT compare to state-of-the-art models with larger parameter sizes? (4) RQ4: What are the scaling properties of the problems generated by PROMPTCoT?", + "bbox": [ + 112, + 84, + 490, + 261 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.1 Ablation Study for RQ1", + "text_level": 1, + "bbox": [ + 112, + 273, + 347, + 288 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We perform an ablation study using 1.5B parameter models in the short-CoT setting and evaluate three variants of PROMPTCoT: exclusion of the rationale, denoted as “- rationale”; exclusion of the optimality condition for the rationale, referred to as “- optimal”; and exclusion of the rejection sampling process, represented as “- rejection sampling”.", + "bbox": [ + 112, + 294, + 490, + 420 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The results in Table 4 show that the full version of PROMPTCoT consistently outperforms all variants, emphasizing the importance of each component. Excluding the rationale leads to the largest performance drop, indicating the crucial role of the rationale in guiding problem generation. The \"optimal\" variant also experiences a performance decrease, albeit smaller, highlighting the significance of the rationale's optimal construction. The exclusion of rejection sampling has the least impact, suggesting that while it helps align the problem generation model with predefined quality criteria, the difficulty of problems plays a more important role in reasoning tasks.", + "bbox": [ + 115, + 423, + 489, + 648 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2 Analysis of Problem Difficulty for RQ2", + "text_level": 1, + "bbox": [ + 112, + 659, + 465, + 674 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "While Table 1 in Section 1 has presented analysis on problem difficulty, we provide more details here. We assess the performance of Qwen2.5-Math-Instruct when tested on problem sets produced by different problem generation methods, including PROMPTCOT and other baselines. Specifically, we calculate the accuracy of Qwen2.5-Math-Instruct on these problem sets to compare their difficulty. We use the accuracy on AIME2024 as a reference. For open-source datasets, including NuminaMath and OpenMathInstruct, we use the provided answers in the datasets as the ground truth to calculate", + "bbox": [ + 112, + 680, + 490, + 873 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "accuracy. For other methods, including KPDDS, Evol-Instruct, and PROMPTCOT, we use a strong reasoning model, DeepSeek-R1-Distill-Qwen-7B, to label the ground truth answers, applying self-consistency (Wang et al., 2022) with 8 rollouts to ensure answer validity. The results in Figure 2 indicate that the difficulty of the problems generated by PROMPTCOT is closer to AIME, outperforming the other methods by a significant margin.", + "bbox": [ + 507, + 84, + 884, + 228 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Additionally, we compare the reasoning tokens required for different problems. To compute the number of reasoning tokens, we use the DeepSeekR1-Distill-Qwen-7B model. In this experiment, reasoning tokens include both the portion between \"\" and \"\" tags, as well as the final CoT. The results in Table 1 demonstrate that PROMPTCoT-generated problems require a considerably higher number of reasoning tokens compared to the other methods, reflecting the increased difficulty of the problems generated by PROMPT-CoT.", + "bbox": [ + 507, + 230, + 885, + 420 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/2d118fb733fe79ffd54fec58bed9e3e5f01d3a23b90ebfadc7b12e61ad8b98fe.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelsMATH-500AIME2024
QwQ-32B90.650.0
DeepSeek-R1-Distill-Qwen-32B†90.463.3
S1-32B93.056.7
PROMPTcot-DS-7B93.060.0
", + "bbox": [ + 512, + 431, + 878, + 511 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 5: Performance comparison with state-of-the-art models having 32B parameters on MATH-500 and AIME2024. $\\dagger$ indicates results reproduced using our prompt.", + "bbox": [ + 507, + 520, + 882, + 579 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3 Performance Comparison for RQ3", + "text_level": 1, + "bbox": [ + 507, + 609, + 826, + 625 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We further compare PROMPTCoT-DS-7B with state-of-the-art reasoning models that have significantly larger parameter sizes to evaluate whether our model can match the reasoning capabilities typically associated with larger models. Specifically, we compare PROMPTCoT-DS-7B with the models QwQ (Team, 2024), DeepSeek-R1-Distill-Qwen32B (Guo et al., 2025), and S1-32B (Muennighoff et al., 2025), all of which leverage long-CoT to solve problems. The results presented in Table 5 show that PROMPTCoT-DS-7B achieves performance comparable to the state-of-the-art 32B parameter models. This suggests that our approach, through synthesizing Olympiad-level problems, enables smaller models to perform at a level previously associated with much larger models, highlighting the effectiveness of our method in enhancing reasoning capabilities without requiring an in", + "bbox": [ + 507, + 631, + 885, + 921 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "Upon implementation, we removed the two conditions marked as “(IMPORTANT)” in the instruction for rationale generation (Appendix B)", + "bbox": [ + 112, + 883, + 487, + 921 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 492, + 942, + 504, + 953 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/35023d8dfedcf299e1028d5e780fb8bae58986c0f2ea35721b5e181ec9f76869.jpg", + "image_caption": [ + "Figure 2: Performance comparison of models with varying capabilities, using Qwen2.5-Math series models, across problems from diverse sources." + ], + "image_footnote": [], + "bbox": [ + 191, + 80, + 394, + 218 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/0aa136e57370be5426cb7619a626c6bb04614abaa879277083cd639e4df7ff4f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 398, + 80, + 600, + 218 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/13fe98cc1817e5239b842fc9c3f92047938b99157cc72ec14844cb086e5bb1b2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 603, + 80, + 803, + 217 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "crease in parameter size.", + "bbox": [ + 112, + 284, + 299, + 299 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a5a8f6f87278b4ebfc4e37e7c0559b80ba985961d3761dbce132eacc940bd80c.jpg", + "image_caption": [ + "Figure 3: Comparison of Scaling Performance between PROMPTCOT and OpenMathInstruct across Varying Problem Set Sizes" + ], + "image_footnote": [], + "bbox": [ + 152, + 310, + 450, + 447 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4 Scaling Properties for RQ4", + "text_level": 1, + "bbox": [ + 112, + 526, + 374, + 542 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We evaluate the scaling property of PROMPTCOt by comparing it with OpenMathInstruct across varying problem sizes, ranging from 200K to 800K problems. To conduct the evaluation, we use Qwen2.5-Math-1.5B as the base model and Qwen2.5-Math-Instruct-72B to generate solutions for the problems. We select MATH-500 as the evaluation dataset due to its balanced difficulty and appropriate scale. The results shown in Figure 3 demonstrate that PROMPTCOt consistently outperforms OpenMathInstruct across all problem sizes. PROMPTCOt exhibits significant scalability, maintaining superior performance as the dataset size increases, while OpenMathInstruct's performance gradually plateaus with larger problem sets.", + "bbox": [ + 112, + 546, + 489, + 789 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5 Related Work", + "text_level": 1, + "bbox": [ + 112, + 799, + 268, + 814 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Mathematical Reasoning with LLMs. Recent advances in LLMs' mathematical capabilities have been driven by three key directions: data enrichment, methodological innovation, and test-time scaling. While extensive mathematical pre-training corpora (Paster et al., 2023; Wang et al., 2024;", + "bbox": [ + 112, + 824, + 489, + 921 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Azerbaiyev et al., 2024; Shao et al., 2024) and curated fine-tuning datasets (Yue et al., 2023, 2024; Li et al., 2024a; Toshniwal et al., 2024; Wen et al., 2025; Ye et al., 2025) have enhanced model performance, concerns persist about their true reasoning capabilities versus pattern memorization (Li et al., 2024c; Mirzadeh et al., 2024; Zhang et al., 2024; Kambhampati, 2024). To address these limitations, researchers have explored in-context learning (Zhao et al., 2024c), advanced prompting techniques (Wei et al., 2022b; Press et al., 2023; Imani et al., 2023; Zhao et al., 2024b), specialized training approaches (Zhao et al., 2024a; Mitra et al., 2024; OpenAI, 2024a), and tool integration (Gao et al., 2023; Schick et al., 2024). Notable breakthroughs include DeepSeek-R1's (Guo et al., 2025) rule-based reinforcement learning and test-time scaling advances (Wu et al., 2024; Guan et al., 2025; Muennighoff et al., 2025). However, generating Olympiad-level problems remains challenging.", + "bbox": [ + 507, + 284, + 884, + 605 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Prompt Synthesis. Current prompt synthesis approaches fall into three categories: LLM-driven generation, data-derived synthesis, and heuristic composition. LLM-driven methods use advanced models to generate prompts from seeds (Wang et al., 2023; Li et al., 2024b), sometimes incorporating personas (Ge et al., 2024) or concepts (Huang et al., 2024; Tang et al., 2024). Xu et al. (2024) introduced auto-regressive template completion for query generation. Data-derived approaches extract prompts from existing sources, using techniques like instruction backtranslation (Li et al., 2024d; Zhao et al., 2024d) and web corpus refinement (Yue et al., 2024). Heuristic composition methods systematically combine simpler elements, as demonstrated in visual reasoning (Cheng et al., 2024) and mathematical problem construction (Luo et al., 2023; Yu et al., 2024). While these methods show promise, they lack comprehensive rationales in syn", + "bbox": [ + 507, + 615, + 884, + 921 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 492, + 942, + 504, + 954 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "thesis, limiting their ability to generate sophisticated problems. Our approach addresses this by explicitly incorporating rationales during synthesis.", + "bbox": [ + 112, + 84, + 489, + 147 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6 Conclusion", + "text_level": 1, + "bbox": [ + 112, + 161, + 247, + 175 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this paper, we introduce PROMPTCoT, a novel approach for synthesizing Olympiad-level math problems. Our method integrates rationale generation to guide problem synthesis, inspired by the success of LLMs in reasoning tasks such as chain-of-thought. By leveraging this rationale and underlying mathematical concepts, we generate complex, high-quality problems that are well-suited to improve the reasoning capabilities of LLMs. Through extensive experimentation across multiple mathematical reasoning benchmarks, including GSM8K, MATH-500, and AIME2024, we show that PROMPTCoT outperforms existing problem generation methods.", + "bbox": [ + 112, + 186, + 489, + 411 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Ethical Considerations", + "text_level": 1, + "bbox": [ + 112, + 423, + 315, + 437 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In accordance with the established Code of Ethics, this study utilizes only publicly available data and information, ensuring that no private or confidential resources are involved.", + "bbox": [ + 112, + 449, + 487, + 512 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Limitations", + "text_level": 1, + "bbox": [ + 112, + 525, + 218, + 539 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "While PROMPTCoT makes significant strides in the generation of challenging mathematical problems, there are several limitations that should be addressed in future work to fully unlock its potential:", + "bbox": [ + 112, + 551, + 489, + 629 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(1) The problem generation model used in PROMPTCoT is based on Llama-3.1-8B, which, due to its relatively smaller scale, may still struggle to generate exceptionally challenging problems, such as those seen in the International Mathematical Olympiad (IMO). Although it performs well for tasks within the scope of current benchmarks, its capacity limits the generation of problems requiring more advanced reasoning and complexity. In future work, we plan to explore the use of larger pre-trained models to improve the quality and difficulty of the generated problems, enabling the synthesis of more sophisticated problem sets.", + "(2) While PROMPTCoT has successfully scaled problem generation to 905K problems, this scale remains modest compared to the vast amounts of data used for LLM pretraining. Expanding the problem generation capacity to larger scales is crucial for" + ], + "bbox": [ + 112, + 631, + 489, + 921 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "generating more diverse and challenging problem sets. Future research should focus on extending the scalability of problem generation to better align with the scale of current LLM training data, contributing to the development of more robust and capable models.", + "bbox": [ + 507, + 84, + 884, + 181 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 510, + 208, + 608, + 223 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "AIME-2024. https://huggingface.co/datasets/ai-mo/aimo-validation-aime.", + "Zhangir Azerbayev, Hailey Schoelkopf, Keiran Paster, Marco Dos Santos, Stephen McAleer, Albert Q. Jiang, Jia Deng, Stella Biderman, and Sean Welleck. 2024. Llemma: An open language model for mathematics. Preprint, arXiv:2310.10631.", + "Chuanqi Cheng, Jian Guan, Wei Wu, and Rui Yan. 2024. From the least to the most: Building a plug-and-play visual reasoner via data synthesis. Preprint, arXiv:2406.19934.", + "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. 2021. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168.", + "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. 2024. The llama 3 herd of models. arXiv preprint arXiv:2407.21783.", + "Luyu Gao, Aman Madaan, Shuyan Zhou, Uri Alon, Pengfei Liu, Yiming Yang, Jamie Callan, and Graham Neubig. 2023. Pal: Program-aided language models. In International Conference on Machine Learning, pages 10764–10799. PMLR.", + "Tao Ge, Xin Chan, Xiaoyang Wang, Dian Yu, Haitao Mi, and Dong Yu. 2024. Scaling synthetic data creation with 1,000,000,000 personas. Preprint, arXiv:2406.20094.", + "Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. 2025. rstar-math: Small llms can master math reasoning with self-evolved deep thinking. arXiv preprint arXiv:2501.04519.", + "Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948.", + "Yiming Huang, Xiao Liu, Yeyun Gong, Zhibin Gou, Yelong Shen, Nan Duan, and Weizhu Chen. 2024. Key-point-driven data synthesis with its enhancement on mathematical reasoning. arXiv preprint arXiv:2403.02333." + ], + "bbox": [ + 510, + 231, + 884, + 921 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 492, + 942, + 504, + 953 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Shima Imani, Liang Du, and Harsh Shrivastava. 2023. Mathprompter: Mathematical reasoning using large language models. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 5: Industry Track), pages 37-42.", + "Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. 2024. Openai o1 system card. arXiv preprint arXiv:2412.16720.", + "Subbarao Kambhampati. 2024. Can large language models reason and plan? Annals of the New York Academy of Sciences, 1534(1):15-18.", + "Chen Li, Weiqi Wang, Jingcheng Hu, Yixuan Wei, Nanning Zheng, Han Hu, Zheng Zhang, and Houwen Peng. 2024a. Common 7b language models already possess strong math capabilities. Preprint, arXiv:2403.04706.", + "Jia Li, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Huang, Kashif Rasul, Longhui Yu, Albert Q Jiang, Ziju Shen, et al. 2024b. Numinamath: The largest public dataset in ai4maths with 860k pairs of competition math problems and solutions. Hugging Face repository, 13:9.", + "Qintong Li, Leyang Cui, Xueliang Zhao, Lingpeng Kong, and Wei Bi. 2024c. Gsm-plus: A comprehensive benchmark for evaluating the robustness of llms as mathematical problem solvers. arXiv preprint arXiv:2402.19255.", + "Xian Li, Ping Yu, Chunting Zhou, Timo Schick, Omer Levy, Luke Zettlemoyer, Jason Weston, and Mike Lewis. 2024d. Self-alignment with instruction back-translation. Preprint, arXiv:2308.06259.", + "Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. 2023. Let's verify step by step. arXiv preprint arXiv:2305.20050.", + "Haipeng Luo, Qingfeng Sun, Can Xu, Pu Zhao, Jianguang Lou, Chongyang Tao, Xiubo Geng, Qingwei Lin, Shifeng Chen, and Dongmei Zhang. 2023. Wizardmath: Empowering mathematical reasoning for large language models via reinforced evol-instruct. arXiv preprint arXiv:2308.09583.", + "Iman Mirzadeh, Keivan Alizadeh, Hooman Shahrokhi, Oncel Tuzel, Samy Bengio, and Mehrdad Farajtabar. 2024. Gsm-symbolic: Understanding the limitations of mathematical reasoning in large language models. Preprint, arXiv:2410.05229.", + "Arindam Mitra, Hamed Khanpour, Corby Rosset, and Ahmed Awadallah. 2024. Orca-math: Unlocking the potential of slms in grade school math. arXiv preprint arXiv:2402.14830." + ], + "bbox": [ + 115, + 85, + 485, + 920 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. 2025. s1: Simple test-time scaling. Preprint, arXiv:2501.19393.", + "OpenAI. 2024a. Introducing openai o1-preview. https://openai.com/index/introducing-openai-o1-preview/.", + "OpenAI. 2024b. Learning to reason with llms, september 2024.", + "Keiran Paster, Marco Dos Santos, Zhangir Azerbayev, and Jimmy Ba. 2023. Openwebmath: An open dataset of high-quality mathematical web text. Preprint, arXiv:2310.06786.", + "Ofir Press, Muru Zhang, Sewon Min, Ludwig Schmidt, Noah A Smith, and Mike Lewis. 2023. Measuring and narrowing the compositionality gap in language models. In Findings of the Association for Computational Linguistics: EMNLP 2023, pages 5687-5711.", + "Timo Schick, Jane Dwivedi-Yu, Roberto Dessì, Roberta Raileanu, Maria Lomeli, Eric Hambro, Luke Zettle-moyer, Nicola Cancedda, and Thomas Scialom. 2024. Toolformer: Language models can teach themselves to use tools. Advances in Neural Information Processing Systems, 36.", + "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Mingchuan Zhang, YK Li, Yu Wu, and Daya Guo. 2024. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300.", + "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. 2024. Scaling llm test-time compute optimally can be more effective than scaling model parameters. arXiv preprint arXiv:2408.03314.", + "Zhengyang Tang, Xingxing Zhang, Benyou Wang, and Furu Wei. 2024. Mathscale: Scaling instruction tuning for mathematical reasoning. arXiv preprint arXiv:2403.02884.", + "Qwen Team. 2024. Qwq: Reflect deeply on the boundaries of the unknown.", + "Shubham Toshniwal, Wei Du, Ivan Moshkov, Branislav Kisacanin, Alexan Ayrapetyan, and Igor Gitman. 2024. Openmathinstruct-2: Accelerating ai for math with massive open-source instruction data. arXiv preprint arXiv:2410.01560.", + "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. 2022. Self-consistency improves chain of thought reasoning in language models. arXiv preprint arXiv:2203.11171.", + "Yizhong Wang, Yeganeh Kordi, Swaroop Mishra, Alisa Liu, Noah A. Smith, Daniel Khashabi, and Hannaneh Hajishirzi. 2023. Self-instruct: Aligning language" + ], + "bbox": [ + 510, + 85, + 880, + 920 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 489, + 942, + 509, + 954 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "models with self-generated instructions. Preprint, arXiv:2212.10560.", + "Zengzhi Wang, Xuefeng Li, Rui Xia, and Pengfei Liu. 2024. Mathpile: A billion-token-scale pretraining corpus for math. Preprint, arXiv:2312.17120.", + "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022a. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837.", + "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022b. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837.", + "Jiaxin Wen, Jian Guan, Hongning Wang, Wei Wu, and Minlie Huang. 2025. Unlocking reasoning potential in large language models by scaling code-form planning. In The Thirteenth International Conference on Learning Representations.", + "Yangzhen Wu, Zhiqing Sun, Shanda Li, Sean Welleck, and Yiming Yang. 2024. Inference scaling laws: An empirical analysis of compute-optimal inference for problem-solving with language models. arXiv preprint arXiv:2408.00724.", + "Can Xu, Qingfeng Sun, Kai Zheng, Xiubo Geng, Pu Zhao, Jiazhan Feng, Chongyang Tao, and Daxin Jiang. 2023. Wizardlm: Empowering large language models to follow complex instructions. arXiv preprint arXiv:2304.12244.", + "Zhangchen Xu, Fengqing Jiang, Luyao Niu, Yuntian Deng, Radha Poovendran, Yejin Choi, and Bill Yuchen Lin. 2024. Magpie: Alignment data synthesis from scratch by prompting aligned llms with nothing. Preprint, arXiv:2406.08464.", + "An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. 2024a. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115.", + "An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, et al. 2024b. Qwen2.5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122.", + "Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. 2025. Limo: Less is more for reasoning. Preprint, arXiv:2502.03387.", + "Longhui Yu, Weisen Jiang, Han Shi, Jincheng Yu, Zhengying Liu, Yu Zhang, James T Kwok, Zhenguo Li, Adrian Weller, and Weiyang Liu. 2023. Metamath: Bootstrap your own mathematical questions for large language models. arXiv preprint arXiv:2309.12284." + ], + "bbox": [ + 115, + 85, + 487, + 919 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Longhui Yu, Weisen Jiang, Han Shi, Jincheng Yu, Zhengying Liu, Yu Zhang, James T. Kwok, Zhenguo Li, Adrian Weller, and Weiyang Liu. 2024. Metamath: Bootstrap your own mathematical questions for large language models. Preprint, arXiv:2309.12284.", + "Xiang Yue, Xingwei Qu, Ge Zhang, Yao Fu, Wenhao Huang, Huan Sun, Yu Su, and Wenhu Chen. 2023. Mammoth: Building math generalist models through hybrid instruction tuning. Preprint, arXiv:2309.05653.", + "Xiang Yue, Tuney Zheng, Ge Zhang, and Wenhu Chen. 2024. Mammoth2: Scaling instructions from the web. Preprint, arXiv:2405.03548.", + "Hugh Zhang, Jeff Da, Dean Lee, Vaughn Robinson, Catherine Wu, Will Song, Tiffany Zhao, Pranav Raja, Charlotte Zhuang, Dylan Slack, Qin Lyu, Sean Hendryx, Russell Kaplan, Michele Lunati, and Summer Yue. 2024. A careful examination of large language model performance on grade school arithmetic. Preprint, arXiv:2405.00332.", + "Xueliang Zhao, Xinting Huang, Wei Bi, and Lingpeng Kong. 2024a. Sego: Sequential subgoal optimization for mathematical problem-solving. In The 62nd Annual Meeting of the Association for Computational Linguistics (11/08/2024-16/08/2024, Bangkok, Thailand).", + "Xueliang Zhao, Xinting Huang, Tingchen Fu, Qintong Li, Shansan Gong, Lemao Liu, Wei Bi, and Lingpeng Kong. 2024b. Bba: Bi-modal behavioral alignment for reasoning with large vision-language models. In ACL (Findings).", + "Xueliang Zhao, Wenda Li, and Lingpeng Kong. 2024c. Subgoal-based demonstration learning for formal theorem proving. In Forty-first International Conference on Machine Learning.", + "Xueliang Zhao, Lin Zheng, Haige Bo, Changran Hu, Urmish Thakker, and Lingpeng Kong. 2024d. Subgoalxl: Subgoal-based expert learning for theorem proving. arXiv preprint arXiv:2408.11172." + ], + "bbox": [ + 510, + 85, + 880, + 682 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 489, + 942, + 507, + 954 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "A Instruction for Concept Extraction", + "text_level": 1, + "bbox": [ + 114, + 84, + 453, + 99 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The following prompt extracts domain-specific concepts from each seed prompt. This instruction directs the large language model to identify the salient concepts underlying the given mathematical problem.", + "bbox": [ + 112, + 121, + 882, + 154 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Concept Extraction Prompt", + "text_level": 1, + "bbox": [ + 141, + 174, + 346, + 189 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "As an expert in educational assessment, analyze this problem: {problem}", + "bbox": [ + 139, + 200, + 596, + 231 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Break down and identify {num_concepts} foundational concepts being tested. List these knowledge points that:", + "bbox": [ + 139, + 233, + 855, + 265 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Are core curriculum concepts typically taught in standard courses,", + "- Are precise and measurable (not vague like \"understanding math\"),", + "- Are essential building blocks needed to solve this problem,", + "- Represent fundamental principles rather than problem-specific techniques." + ], + "bbox": [ + 161, + 275, + 726, + 370 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Think through your analysis step by step, then format your response as a Python code snippet containing a list of {num_concepts} strings, where each string clearly describes one fundamental knowledge point.", + "bbox": [ + 137, + 382, + 857, + 431 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "B Instruction for Rationale Generation", + "text_level": 1, + "bbox": [ + 114, + 468, + 468, + 483 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The following prompt is used to guide the large language model in generating a detailed thought process that serves as the rationale for prompt generation. Note that we emphasize two requirements with \"IMPORTANT\" markers in the prompt, aiming to enhance $p(x|z,\\mathbf{c})$ and $p(z|\\mathbf{c})$ , respectively.", + "bbox": [ + 110, + 506, + 882, + 555 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Rationale Generation Instruction", + "text_level": 1, + "bbox": [ + 141, + 575, + 386, + 589 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Imagine you are an expert in educational problem design.", + "bbox": [ + 141, + 599, + 566, + 614 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "You will be shown these components:", + "bbox": [ + 141, + 615, + 420, + 630 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Problem: {problem}", + "bbox": [ + 142, + 631, + 300, + 646 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Fundamental Concepts: {list of concepts}", + "bbox": [ + 142, + 648, + 465, + 663 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Difficulty Level: {difficulty_level}", + "bbox": [ + 142, + 664, + 403, + 678 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Your task is to reverse-engineer a clear thinking process that shows how a teacher might design this problem. This thinking process should:", + "bbox": [ + 141, + 680, + 853, + 711 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Show how combining the given foundational concepts naturally leads to a problem at the specified difficulty level.", + "- Include all key decisions and reasoning that shaped the problem design.", + "- (IMPORTANT) Be so precise and detailed that another teacher following these exact steps would recreate the identical problem.", + "- (IMPORTANT) Be so natural and logical that another teacher could derive the same thinking process using only the foundational concepts and difficulty level." + ], + "bbox": [ + 161, + 721, + 853, + 866 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Present your answer after \"Thinking Process:\" with the complete step-by-step thinking process described above.", + "bbox": [ + 139, + 877, + 855, + 908 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 489, + 941, + 509, + 954 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Evaluation Prompt", + "text_level": 1, + "bbox": [ + 141, + 186, + 284, + 200 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "As a critical expert in educational problem design, evaluate the following problem components:", + "bbox": [ + 139, + 211, + 719, + 225 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "$= = =$ GIVEN MATERIALS $= = =$", + "bbox": [ + 139, + 225, + 347, + 235 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- 1. Problem & Design Rationale: {rationale_and probleme} (The rationale describes the author's thinking process and justification in designing this problem)", + "- 2. Foundational Concepts: {concept_text}", + "3. Target Difficulty Level: {level}" + ], + "bbox": [ + 163, + 247, + 766, + 317 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "$= = =$ EVALUATION CRITERIA $= = =$", + "bbox": [ + 139, + 328, + 381, + 338 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Rate each criterion as: [Perfect | Acceptable | Bad]", + "bbox": [ + 139, + 340, + 450, + 353 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "1. FORMAT", + "bbox": [ + 164, + 363, + 263, + 375 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Verify correct implementation of markup tags:", + "-