Chelsea707 commited on
Commit
05e8763
·
verified ·
1 Parent(s): 75498c4

Add MinerU batch 49144e31-e87c-4d49-9d52-1178c26b083f

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +8 -0
  2. data/2025/2503_02xxx/2503.02318/185a8d9e-02ca-40b7-9498-1733bd88ebda_content_list.json +0 -0
  3. data/2025/2503_02xxx/2503.02318/185a8d9e-02ca-40b7-9498-1733bd88ebda_model.json +0 -0
  4. data/2025/2503_02xxx/2503.02318/185a8d9e-02ca-40b7-9498-1733bd88ebda_origin.pdf +3 -0
  5. data/2025/2503_02xxx/2503.02318/full.md +720 -0
  6. data/2025/2503_02xxx/2503.02318/images.zip +3 -0
  7. data/2025/2503_02xxx/2503.02318/layout.json +0 -0
  8. data/2025/2503_02xxx/2503.02324/ccb8695f-9db7-4b2d-953a-29631612cfb2_content_list.json +0 -0
  9. data/2025/2503_02xxx/2503.02324/ccb8695f-9db7-4b2d-953a-29631612cfb2_model.json +0 -0
  10. data/2025/2503_02xxx/2503.02324/ccb8695f-9db7-4b2d-953a-29631612cfb2_origin.pdf +3 -0
  11. data/2025/2503_02xxx/2503.02324/full.md +892 -0
  12. data/2025/2503_02xxx/2503.02324/images.zip +3 -0
  13. data/2025/2503_02xxx/2503.02324/layout.json +0 -0
  14. data/2025/2503_02xxx/2503.02357/cde5e469-d77b-49c6-a6a3-634c026a56f4_content_list.json +0 -0
  15. data/2025/2503_02xxx/2503.02357/cde5e469-d77b-49c6-a6a3-634c026a56f4_model.json +0 -0
  16. data/2025/2503_02xxx/2503.02357/cde5e469-d77b-49c6-a6a3-634c026a56f4_origin.pdf +3 -0
  17. data/2025/2503_02xxx/2503.02357/full.md +453 -0
  18. data/2025/2503_02xxx/2503.02357/images.zip +3 -0
  19. data/2025/2503_02xxx/2503.02357/layout.json +0 -0
  20. data/2025/2503_02xxx/2503.02358/afddd584-7d0d-4efd-a372-e9f42b569ad5_content_list.json +0 -0
  21. data/2025/2503_02xxx/2503.02358/afddd584-7d0d-4efd-a372-e9f42b569ad5_model.json +0 -0
  22. data/2025/2503_02xxx/2503.02358/afddd584-7d0d-4efd-a372-e9f42b569ad5_origin.pdf +3 -0
  23. data/2025/2503_02xxx/2503.02358/full.md +0 -0
  24. data/2025/2503_02xxx/2503.02358/images.zip +3 -0
  25. data/2025/2503_02xxx/2503.02358/layout.json +0 -0
  26. data/2025/2503_02xxx/2503.02390/635e71fe-44a1-404f-ae63-3692c9e475c1_content_list.json +0 -0
  27. data/2025/2503_02xxx/2503.02390/635e71fe-44a1-404f-ae63-3692c9e475c1_model.json +0 -0
  28. data/2025/2503_02xxx/2503.02390/635e71fe-44a1-404f-ae63-3692c9e475c1_origin.pdf +3 -0
  29. data/2025/2503_02xxx/2503.02390/full.md +576 -0
  30. data/2025/2503_02xxx/2503.02390/images.zip +3 -0
  31. data/2025/2503_02xxx/2503.02390/layout.json +0 -0
  32. data/2025/2503_02xxx/2503.02407/659ebb2e-73c9-4ddb-871a-4d8552af4b45_content_list.json +0 -0
  33. data/2025/2503_02xxx/2503.02407/659ebb2e-73c9-4ddb-871a-4d8552af4b45_model.json +0 -0
  34. data/2025/2503_02xxx/2503.02407/659ebb2e-73c9-4ddb-871a-4d8552af4b45_origin.pdf +3 -0
  35. data/2025/2503_02xxx/2503.02407/full.md +0 -0
  36. data/2025/2503_02xxx/2503.02407/images.zip +3 -0
  37. data/2025/2503_02xxx/2503.02407/layout.json +0 -0
  38. data/2025/2503_02xxx/2503.02424/8601c576-a221-400a-ab72-3a3c50fe8e1d_content_list.json +0 -0
  39. data/2025/2503_02xxx/2503.02424/8601c576-a221-400a-ab72-3a3c50fe8e1d_model.json +0 -0
  40. data/2025/2503_02xxx/2503.02424/8601c576-a221-400a-ab72-3a3c50fe8e1d_origin.pdf +3 -0
  41. data/2025/2503_02xxx/2503.02424/full.md +0 -0
  42. data/2025/2503_02xxx/2503.02424/images.zip +3 -0
  43. data/2025/2503_02xxx/2503.02424/layout.json +0 -0
  44. data/2025/2503_02xxx/2503.02445/79b9e0b6-153b-484b-87de-9d51773ebdf4_content_list.json +0 -0
  45. data/2025/2503_02xxx/2503.02445/79b9e0b6-153b-484b-87de-9d51773ebdf4_model.json +0 -0
  46. data/2025/2503_02xxx/2503.02445/79b9e0b6-153b-484b-87de-9d51773ebdf4_origin.pdf +3 -0
  47. data/2025/2503_02xxx/2503.02445/full.md +0 -0
  48. data/2025/2503_02xxx/2503.02445/images.zip +3 -0
  49. data/2025/2503_02xxx/2503.02445/layout.json +0 -0
  50. data/2025/2503_02xxx/2503.02450/images.zip +3 -0
.gitattributes CHANGED
@@ -1979,3 +1979,11 @@ data/2025/2503_13xxx/2503.13646/ec61822f-80f5-4b4c-80e5-df7fb23ac8f4_origin.pdf
1979
  data/2025/2503_13xxx/2503.13657/f9f0f12f-c46c-4f07-9710-e29462265246_origin.pdf filter=lfs diff=lfs merge=lfs -text
1980
  data/2025/2503_13xxx/2503.13721/a97f1dd6-65e6-4086-81b6-b457f406ad62_origin.pdf filter=lfs diff=lfs merge=lfs -text
1981
  data/2025/2503_16xxx/2503.16527/a8560f1b-d038-456e-8a05-1cf3f71e9afc_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
1979
  data/2025/2503_13xxx/2503.13657/f9f0f12f-c46c-4f07-9710-e29462265246_origin.pdf filter=lfs diff=lfs merge=lfs -text
1980
  data/2025/2503_13xxx/2503.13721/a97f1dd6-65e6-4086-81b6-b457f406ad62_origin.pdf filter=lfs diff=lfs merge=lfs -text
1981
  data/2025/2503_16xxx/2503.16527/a8560f1b-d038-456e-8a05-1cf3f71e9afc_origin.pdf filter=lfs diff=lfs merge=lfs -text
1982
+ data/2025/2503_02xxx/2503.02318/185a8d9e-02ca-40b7-9498-1733bd88ebda_origin.pdf filter=lfs diff=lfs merge=lfs -text
1983
+ data/2025/2503_02xxx/2503.02324/ccb8695f-9db7-4b2d-953a-29631612cfb2_origin.pdf filter=lfs diff=lfs merge=lfs -text
1984
+ data/2025/2503_02xxx/2503.02357/cde5e469-d77b-49c6-a6a3-634c026a56f4_origin.pdf filter=lfs diff=lfs merge=lfs -text
1985
+ data/2025/2503_02xxx/2503.02358/afddd584-7d0d-4efd-a372-e9f42b569ad5_origin.pdf filter=lfs diff=lfs merge=lfs -text
1986
+ data/2025/2503_02xxx/2503.02390/635e71fe-44a1-404f-ae63-3692c9e475c1_origin.pdf filter=lfs diff=lfs merge=lfs -text
1987
+ data/2025/2503_02xxx/2503.02407/659ebb2e-73c9-4ddb-871a-4d8552af4b45_origin.pdf filter=lfs diff=lfs merge=lfs -text
1988
+ data/2025/2503_02xxx/2503.02424/8601c576-a221-400a-ab72-3a3c50fe8e1d_origin.pdf filter=lfs diff=lfs merge=lfs -text
1989
+ data/2025/2503_02xxx/2503.02445/79b9e0b6-153b-484b-87de-9d51773ebdf4_origin.pdf filter=lfs diff=lfs merge=lfs -text
data/2025/2503_02xxx/2503.02318/185a8d9e-02ca-40b7-9498-1733bd88ebda_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_02xxx/2503.02318/185a8d9e-02ca-40b7-9498-1733bd88ebda_model.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_02xxx/2503.02318/185a8d9e-02ca-40b7-9498-1733bd88ebda_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7080d659f390283f0293a487f17dc6aaf0613e1f6e888d3f9906febfd7a48754
3
+ size 2117618
data/2025/2503_02xxx/2503.02318/full.md ADDED
@@ -0,0 +1,720 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Audio-Reasoner: Improving Reasoning Capability in Large Audio Language Models
2
+
3
+ Zhifei Xie $^{1\dagger}$ Mingbao Lin $^{3\dagger}$ Zihang Liu $^{2\dagger}$
4
+
5
+ Pengcheng Wu<sup>1</sup> Shuicheng Yan<sup>2†</sup> Chunyan Miao<sup>1‡</sup>
6
+
7
+ $^{1}$ Nanyang Technological University $^{2}$ National University of Singapore $^{3}$ Rakuten
8
+
9
+ †Equal Contributions ‡Corresponding Authors
10
+
11
+ zhifei001@e.ntu.edu.sg linmb001@outlook.com liuzihang99@gmail.com
12
+
13
+ pengchengwu@ntu.edu.sg yansc@nus.edu.sg ascymiao@ntu.edu.sg
14
+
15
+ Project: https://github.com/xzf-thu/Audio-Reasoner
16
+
17
+ # Abstract
18
+
19
+ Recent advancements in multimodal reasoning have largely overlooked the audio modality. We introduce Audio-Reasoner, a large-scale audio language model for deep reasoning in audio tasks. We curate a diverse collection of multi-task audio datasets with simple annotations, refining them through structured secondary labeling and complex question generation. Additionally, We utilize advanced closed-source models to generate structured reasoning chains, transforming raw annotations into a formatted inference process. Following inference scaling principles, we train Audio-Reasoner on CoTA, a high-quality reasoning dataset with 1.2 million reasoning-rich samples. Experiments show state-of-the-art performance across key benchmarks, including MMAU-mini $(+25.42\%)$ , AIR-Bench chat/foundation $(+14.57\% / + 10.13\%)$ , and MELD $(+8.01\%)$ . Our findings stress the core of structured CoT training in advancing audio reasoning.
20
+
21
+ ![](images/6b8d76aec6a568542a810d60105df57d132003453be82af5476ad0dd56265004.jpg)
22
+ Figure 1: Benchmark performance of Audio-Reasoner on tasks of speech emotion recognition (SER), speech-to-text translation (S2TT), MMAU, and AIR-Bench chat. S2TT is measured using BLEU while SER and MMAU are measured ACC (left axis). AIR-Bench is scored by GPT (right axis).
23
+
24
+ # 1 Introduction
25
+
26
+ Recent advancements in large language models (LLMs) have significantly enhanced their reasoning capabilities, driven by innovations like chain-of-thought (CoT) and inference scaling. Models such as OpenAI's o1 [Jaech et al., 2024] and Deepseek-R1 [Guo et al., 2025] have demonstrated significant improvements, especially in tasks like mathematics and coding [Team et al., 2025, Zhao et al., 2024a, Muennighoff et al., 2025, Liu et al., 2024a, Zhang et al., 2024b, Deng et al., 2024, Hui et al., 2024, Yang et al., 2024a]. These models have set a precedent for achieving "deep thinking" by tackling complex, structured tasks. The CoT framework, in particular, has been successfully applied to multimodal models, improving their cognitive abilities in image and video reasoning. Models such as Visual-CoT [Shao et al., 2024a], LLaVA-Reasoner [Zhang et al., 2024a], and MAmmoTH-VL [Guo et al., 2024] have demonstrated that large-scale datasets and multidimensional reasoning can enhance model performance, even in tasks involving images and videos [Zou et al., 2023]. Other works, like Mulberry [Wen et al., 2019] and Image-of-Thought [Zhou et al., 2024], integrate reflective and image-editing tools to further refine multimodal reasoning, offering new pathways for understanding complex queries across multiple modalities.
27
+
28
+ However, the application of CoT in the audio modality has been largely underexplored. Although models like Audio Flamingo [Kong et al., 2024], SALMONN [Tang et al., 2023], and Qwen2-Audio [Chu et al., 2024] have pushed the boundaries of large audio language models (LALMs), these advancements have not yet incorporated CoT reasoning at scale. Recent research [Ma et al., 2025] suggests that existing CoT methods, such as zero-shot reasoning in audio tasks, fail to significantly improve performance on more complex queries. This limitation is largely attributed to the simplicity of existing audio datasets—such as AudioSet [Gemmeke et al., 2017], AudioCaps [Kim et al., 2019], and Clotho [Drossos et al., 2020)—which predominantly feature short, simple labels. These simplified datasets hinder the development of LALMs capable of more intricate reasoning. Without richer, more complex data, these models struggle with long-form reasoning, and the application of CoT often leads to severe hallucinations and degraded performance. Therefore, advancing CoT in LALMs necessitates overcoming these dataset limitations to allow for more effective, deep reasoning.
29
+
30
+ To address the challenges in audio-based reasoning, we propose a scalable and effective approach to generating high-quality pretraining data. Using state-of-the-art commercial models, we introduce CoTA, a large-scale dataset containing 1.2 million refined captions and question-answer (QA) pairs. CoTA spans multiple datasets and tasks, undergoing rigorous filtering to ensure diversity and quality. Building on CoTA, we develop Audio-Reasoner, a large audio language model designed for long-context reasoning. Audio-Reasoner is trained with a 4K token context window and generates structured CoT reasoning with length could more than exceeding 1K tokens in real-world tasks. The model is fine-tuned on CoTA using supervised fine-tuning, adhering to a structured reasoning framework, as illustrated in Figure 2: (1) Planning—Identifies key problem components from the user query and formulates a structured sequence of reasoning steps essential for deriving an answer. (2) Caption—Extracts and integrates relevant multimodal content from the input to enrich the reasoning process. (3) Reasoning—Executes a systematic, step-by-step reasoning procedure to ensure logical coherence and accuracy. (4) Summary—Synthesizes the reasoning process into a final response that is concise, contextually grounded, and precise.
31
+
32
+ Our experimental results, partially presented in Figure 1, demonstrate the effectiveness of Audio-Reasoner. More comprehensively, we evaluate the model across multiple benchmarks: MMAU-mini [Sakshi et al., 2024]: A dataset with 1,500 closed-choice questions testing reasoning across sound, speech, and music. AIR-Bench [Yang et al., 2024b]: Various types of audio signals including human speech, natural sounds, and music. CoVoST2(zh-en) [Wang et al., 2021]: Speech-to-text translation in Chinese and English. MELD [Poria et al., 2019]: Emotion classification. Compared to Qwen2-Audio-Instruct [Chu et al., 2024], Audio-Reasoner achieves: $+25.4\%$ improvement on MMAU-mini with reasoning subtask gains: $+44.4\%$ , $+26.1\%$ , and $+9.3\%$ ; $+14.6\%$ gains on AIR-Bench chat; $+30.6\%$ on CoVoST2(ZN/EN subset, Average BLEU score.); $+8.01\%$ on MELD. These results validate the effectiveness of our approach in advancing long-context reasoning and inference scaling for audio models.
33
+
34
+ The major contributions we have made in this paper include:
35
+
36
+ - We propose Audio-Reasoner, designed for deep reasoning and inference scaling in the audio modality. Built upon Qwen2-Audio and fine-tuned with structured CoT training, Audio-Reasoner significantly improves long-context reasoning across diverse audio tasks.
37
+
38
+ - We develop CoTA, a large-scale dataset with 1.2 million high-quality captions and QA pairs, spanning multiple audio domains. The dataset enables structured reasoning and enhances audio-language pretraining.
39
+ - We introduce a scalable data generation pipeline leveraging advanced commercial models to produce complex reasoning-based QA pairs and structured CoT annotations, enriching model training.
40
+ - We achieve state-of-the-art performance, with $+25.4\%$ gains over Qwen2-Audio-Instruct on MMAU-mini, along with significant improvements in reasoning, translation, and emotion recognition tasks.
41
+
42
+ # 2 Related Work
43
+
44
+ Chain-of-Thought Reasoning. LLMs leverage in-context learning (ICL) to enhance their reasoning capabilities by processing prompts and context. This is further strengthened through CoT techniques. Various CoT methods have been explored, including Tree of Thoughts (TOT) [Yao et al., 2023], manual few-shot CoT [Wei et al., 2022], and automatically generated approaches [Jin et al., 2024, Zhang et al., 2022]. In addition, studies have delved into the necessity of CoT, its theoretical foundations, and its effectiveness across a wide range of tasks [Sprague et al., 2024, Stechly et al., 2024, Turpin et al., 2023]. The release of OpenAI's o1 model [Jaech et al., 2024] has sparked renewed interest in CoT research, significantly boosting the capabilities of LLMs, especially in multi-step reasoning tasks such as coding [Zhang et al., 2024b] and mathematics [Yang et al., 2024a], setting new performance benchmarks. CoT techniques have been integrated with other methods such as Monte Carlo Tree Search [Browne et al., 2012], reflection [Guo et al., 2025], and tool use [Qin et al., 2023], and are often trained alongside reinforcement learning approaches [Rafailov et al., 2023, Zeng et al., 2024, Engstrom et al., 2020, Shao et al., 2024b].
45
+
46
+ Multimodal Chain-of-Thought. CoT techniques have also been explored in the realm of multimodal large models. For example, Visual-COT [Shao et al., 2024a] incorporates object detection to aid in reasoning, LLaVA-Reasoner [Zhang et al., 2024a] uses closed-source models for CoT fine-tuning through recaptioning. LLaVA-CoT [Xu et al., 2024] and MAmmoTH-VL [Guo et al., 2024] scale datasets to improve model performance. Other models, such as Mulberry [Wen et al., 2019], explore application of reflective thinking, and Image-of-Thought [Zhou et al., 2024] integrates image editing tools to enhance reasoning. Video-related studies [Wang et al., 2024, Han et al., 2024, Fei et al., 2024, Tang et al., 2024] have demonstrated the effectiveness of CoT in reasoning tasks within the video domain. However, the application of CoT in the audio domain is still in its infancy. The study Audio-COT [Ma et al., 2025] shows some improvement with zero-shot COT in audio tasks, but it falls short on more complex problems. This paper aims to explore this gap further.
47
+
48
+ Large Audio Language Models. LALMs can be broadly categorized into two areas: audio understanding and real-time dialogue. Audio understanding models typically consist of a three-layer architecture—an encoder, connector, and an LLM—focusing on specific domains, as seen in models like Mu-LLaMA [Liu et al., 2024b], LTU [Gong et al., 2023b], EmoBox [Ma et al., 2024], and GAMA [Ghosh et al., 2024]. Other models, such as LTU-AS [Gong et al., 2023a], SALMONN [Tang et al., 2023] and Qwen2-Audio [Chu et al., 2024], employ unified architectures designed for multi-task training. Real-time conversation models, which focus on speech input and extend transformers to real-time speech synthesis, are also gaining popularity [Zhang et al., 2023b, Xie and Wu, 2024a,b, Fu et al., 2025, Defossez et al., 2024]. However, despite their focus on understanding and rapid response, current LALMs still lack significant exploration into reasoning tasks, such as COT. This paper addresses this gap by investigating the application of CoT in LALMs.
49
+
50
+ # 3 Audio-Reasoner
51
+
52
+ In this section, we present the training methodology for our Audio-Reasoner model, designed to enable LALMs with structured, step-by-step reasoning in multimodal tasks. Unlike standard models, Audio-Reasoner generates both a chain-of-thought (CoT) and final response, ensuring logical consistency and interpretability. We describe the training framework, which includes a structured CoT approach, a novel data generation pipeline, and the large-scale CoTA dataset. This dataset is carefully constructed through automated annotation, reasoning chain creation, and validation, setting the foundation for robust and explainable audio-based reasoning.
53
+
54
+ ![](images/43b5b9c5fc69a44f947aab314c7528399913cc4f40054888654c56601c09c66b.jpg)
55
+ Figure 2: Comparison between Audio-Reasoner and Qwen2-Audio-Instruct: While Qwen2-Audio-Instruct produces brief and error-prone reasoning, our Audio-Reasoner uses a structured reasoning framework with distinct "thinking" and "response" phases, ensuring a more reliable and well-supported output through careful planning, information extraction, and step-by-step reasoning.
56
+
57
+ # 3.1 Model Training with Audio Reasoning
58
+
59
+ A standard large language model is trained to generate an output sequence $Y$ given an input sequence $X$ . The probability distribution of the model's output is formulated as:
60
+
61
+ $$
62
+ P (Y \mid X; \theta) = f _ {\theta} (X), \tag {1}
63
+ $$
64
+
65
+ where $f_{\theta}$ is a Transformer-based model parameterized by $\theta$ . The training objective follows a maximum likelihood estimation framework:
66
+
67
+ $$
68
+ \mathcal {L} (\theta) = - \sum_ {i = 1} ^ {N} \log P \left(Y _ {i} \mid X _ {i}; \theta\right). \tag {2}
69
+ $$
70
+
71
+ In our Audio-Reasoner, the input consists of an audio signal $A$ and a text-based query $Q$ , forming the multimodal input representation:
72
+
73
+ $$
74
+ X = (A, Q). \tag {3}
75
+ $$
76
+
77
+ Unlike conventional LLMs, where the output is a single response, we structure the model's output into two distinct components: the chain of thought reasoning $C$ , which captures the step-by-step logical process, and the final response $R$ , which provides the ultimate answer. The model thus learns to generate the concatenation of $C$ and $R$ , leading to the probability distribution:
78
+
79
+ $$
80
+ P (C, R \mid A, Q; \theta) = f _ {\theta} (A, Q). \tag {4}
81
+ $$
82
+
83
+ To ensure explicit learning of both reasoning and final response generation, we construct a dataset defined as:
84
+
85
+ $$
86
+ \mathcal {D} = \left\{\left(A _ {i}, Q _ {i}, C _ {i}, R _ {i}\right) \right\} _ {i = 1} ^ {N}, \tag {5}
87
+ $$
88
+
89
+ where each training sample consists of an input audio signal $A_{i}$ , its corresponding textual query $Q_{i}$ , the structured reasoning process $C_{i}$ , and the final answer $R_{i}$ . This dataset formulation reinforces the model's ability to perform in-context learning and deep reasoning, ensuring that generated responses are not only accurate but also logically structured.
90
+
91
+ The training objective maximizes the likelihood of both $C$ and $R$ , encouraging the model to first reason and then generate a response. The loss function is given by:
92
+
93
+ $$
94
+ \mathcal {L} (\theta) = - \sum_ {i = 1} ^ {N} \log P \left(C _ {i}, R _ {i} \mid A _ {i}, Q _ {i}; \theta\right). \tag {6}
95
+ $$
96
+
97
+ By optimizing this objective, Audio-Reasoner is trained to articulate a structured reasoning process before providing its final response. This approach enhances interpretability, reliability, and alignment with human reasoning.
98
+
99
+ At inference-time, our Audio-Reasoner follows a structured reasoning pipeline, as illustrated in Figure 2. The reasoning process consists of four sequential steps: (1) Planning $(P)$ : The model analyzes the query, identifies key problem components, and outlines the reasoning steps necessary to derive an answer. (2) Captioning $(C)$ : Relevant multimodal content is extracted from the input, such as speech transcription, acoustic event detection, or context information. (3) Reasoning $(R)$ : Based on the extracted content, the model performs structured, step-by-step reasoning. (4) Summary $(S)$ : The model synthesizes its reasoning process into a final, concise, and accurate response. This structured inference process can be formalized as follows:
100
+
101
+ $$
102
+ P \sim f _ {\theta} (A, Q), \tag {7}
103
+ $$
104
+
105
+ $$
106
+ C \sim f _ {\theta} (A, Q, P), \tag {8}
107
+ $$
108
+
109
+ $$
110
+ R \sim f _ {\theta} (A, Q, P, C), \tag {9}
111
+ $$
112
+
113
+ $$
114
+ S \sim f _ {\theta} (A, Q, P, C, R). \tag {10}
115
+ $$
116
+
117
+ Compared to the direct-response counterpart [Chu et al., 2024], this approach provides two key advantages: Improved Interpretability—By explicitly modeling each reasoning step, the process becomes more transparent, making it easier to analyze and diagnose errors. Reduced Hallucinations—The structured reasoning pipeline mitigates speculative or incorrect responses, ensuring that outputs remain logically grounded.
118
+
119
+ Figure 2 illustrates the structured CoT reasoning process, highlighting how each stage contributes to the final response. This approach draws inspiration from recent advancements in symbolic reasoning and CoT training [Cui et al., 2024], which emphasize that zero-shot reasoning without training is less effective. Moreover, previous studies have shown that models tuned on native CoT data significantly outperform those trained on generic labels, especially in multimodal reasoning tasks [Guo et al., 2024, Wen et al., 2019].
120
+
121
+ Table 1: Domains and tasks of our constructed CoTA dataset.
122
+
123
+ <table><tr><td>Domain</td><td>Task</td><td>Description</td></tr><tr><td>Sound</td><td>Sound QA</td><td>Sound question answering</td></tr><tr><td rowspan="3">Speech</td><td>Speech QA</td><td>Speech question answering</td></tr><tr><td>SER</td><td>Speaker emotion recognition</td></tr><tr><td>S2TT</td><td>Speech to text translation</td></tr><tr><td>Music</td><td>Music QA</td><td>Music question answering</td></tr></table>
124
+
125
+ # 3.2 Systematic Data Preparation for Audio Reasoning
126
+
127
+ Training the Audio-Reasoner model requires a high-quality, diverse, and multitask audio-based reasoning dataset. Our goal is to develop a scalable and effective data generation method that systematically transforms raw audio data and simple human-labeled annotations into structured reasoning tasks. The resulting CoTA dataset with 1.2 million samples, focusing on complex reasoning-based question-answering tasks, spans three domains—audio, speech, and music—as detailed in Table 1.
128
+
129
+ To achieve this, we design a structured data generation pipeline consisting of three key stages: (1) generating high-quality annotations and diverse questions, (2) constructing structured reasoning chains, and (3) performing comprehensive validation. The complete pipeline is illustrated in Figure 3. The following sections describe each stage in detail.
130
+
131
+ ![](images/cd2008c2e248f8239147184f97d37d4637b0f8e288fd39a3f6af8d50411851e3.jpg)
132
+ Figure 3: Multistage data generation pipeline.
133
+
134
+ # 3.2.1 Multistage Data Generation Pipeline
135
+
136
+ Stage 1: Automated Annotation and Question-Answer Synthesis. We begin by using advanced external closed-source models to improve the existing simple human annotations into high-quality and logically coherent training data. While large language models can produce hallucinations in free-form generation, they are highly effective at structured, evidence-based content creation. We leverage this strength by instructing the model to sequentially describe the audio elements, enhancing its understanding of sound sources and speech context. Based on these enriched descriptions, the model generates a diverse set of questions, ranging from straightforward factual queries to complex logical reasoning problems. This process ensures that the dataset captures a broad spectrum of reasoning tasks, as summarized in Table 1. Besides, in Sec. A.1 we provide the prompt in deducing closed-source models to generate questions and corresponding answers.
137
+
138
+ Stage 2: Structured Reasoning Chain Construction. Next, we transform the generated question-answer pairs into structured reasoning chains. Given the limited development of CoT methodologies in the audio domain, we adopt a systematic approach to ensure inference stability. The model first plans and analyzes the questions, extracts key information from the captions, and formulates logical steps leading to the answer. To facilitate structured reasoning, we employ explicit step demarcations such as <THINK> and <REASONING>, allowing the model to autonomously construct multi-step inference pathways. Sec. A.2 describes the prompt used for the structured reasoning chain construction process.
139
+
140
+ Stage 3: Quality Assurance and Dataset Validation. Finally, we subject the generated data to a rigorous review process. Using the raw audio input, Stage 1 annotations, and Stage 2 reasoning chains, the model assesses whether the generated content is accurate, coherent, and suitable for inclusion in the final dataset. This step ensures the overall quality and reliability of the CoTA dataset. Sec. A.3 illustrates prompt used for filtering low-quality contents.
141
+
142
+ # 3.2.2 Task Taxonomy: Categories and Reasoning Methodologies
143
+
144
+ The CoTA dataset encompasses a range of reasoning-based tasks, each requiring distinct reasoning path that the model should grasp. These include:
145
+
146
+ (1) Sound-Based Question Answering: The model identifies and analyzes sound characteristics, contextualizing them within the user's query to derive a reasoned response. (2) Speech-Based Question Answering: The model recognizes speaker timbres, transcribes speech content, and incrementally processes the question to determine the appropriate answer. (3) Speech Emotion Recognition (SER) and Speech-to-Text Translation (S2TT): These specialized tasks require the model to integrate speech recognition with emotion analysis and language translation, forming a structured reasoning process. (4) Music-Based Question Answering: As music is highly abstract, the
147
+
148
+ Table 2: Composition of our CoTA Dataset. We consider Google Gemini [Team et al., 2024] to build the reasoning ability in CoTA. Note that Multi-Speaker and Complex Audio datasets are manually synthesized, details of which can be referred to Sec. B.
149
+
150
+ <table><tr><td>Category</td><td>Dataset Source</td><td>Main Skills Learning</td><td>Model Used</td><td>Quantity</td><td>Percentage</td><td>Synthetic</td></tr><tr><td rowspan="4">Speech</td><td>Multi-Speaker</td><td>Multi-speaker Speech QA</td><td>gemini-2.0-flash</td><td>117.4k</td><td>12.09%</td><td>Yes</td></tr><tr><td>MELD [Poria et al., 2019]</td><td>Speech Emotion QA</td><td>gemini-2.0-pro-exp</td><td>29.2k</td><td>3.01%</td><td>No</td></tr><tr><td>CoVoST2 [Wang et al., 2021]</td><td>Speech-to-Text Translation</td><td>gemini-2.0-flash</td><td>224.6k</td><td>23.13%</td><td>No</td></tr><tr><td>Mustard [Castro et al., 2019]</td><td>Sarcasm Detection</td><td>gemini-2.0-pro-exp</td><td>1k</td><td>0.10%</td><td>No</td></tr><tr><td>Music</td><td>MusicBench [Melechovsky et al., 2024]</td><td>Music QA</td><td>gemini-2.0-flash</td><td>137.1k</td><td>14.12%</td><td>No</td></tr><tr><td rowspan="4">Sound</td><td>AudioSet [Gemmeke et al., 2017]</td><td>Sound QA</td><td>gemini-2.0-flash</td><td>315.2k</td><td>32.46%</td><td>No</td></tr><tr><td>Clotho [Drossos et al., 2020]</td><td>Sound QA</td><td>gemini-2.0-pro-exp</td><td>9.3k</td><td>0.93%</td><td>No</td></tr><tr><td>AudioCaps [Kim et al., 2019]</td><td>Sound QA</td><td>gemini-2.0-flash</td><td>117.5k</td><td>12.10%</td><td>No</td></tr><tr><td>Complex Audio</td><td>Complex Audio QA</td><td>gemini-2.0-flash</td><td>20k</td><td>2.06%</td><td>Yes</td></tr></table>
151
+
152
+ model first analyzes fundamental attributes such as tonality, tempo, and emotion before progressing to genre classification and deeper inferential reasoning based on the user's query. A comprehensive breakdown of the dataset's task categories and reasoning pathways is provided in Table 1.
153
+
154
+ # 3.2.3 Conclusion and Next Steps
155
+
156
+ In summary, we have introduced a systematic data generation pipeline that ensures the creation of high-quality, structured reasoning data for the Audio-Reasoner model. Our approach involves enriching raw audio data with detailed annotations, generating diverse questions, constructing explicit reasoning chains, and implementing a comprehensive validation framework. The following section provides an in-depth analysis of the final CoTA dataset and its reasoning capabilities, with a detailed statistical overview presented in Table 2.
157
+
158
+ # 3.3 CoTA Dataset Analysis
159
+
160
+ To evaluate the quality and reasoning efficacy of the CoTA dataset, we analyze its design from two key perspectives: (1) comprehensive audio coverage, ensuring broad representation across real-world and synthetic scenarios, and (2) scalability of reasoning complexity, which aligns task difficulty with structured inference patterns. Together, these aspects address critical gaps in audio-language pretraining.
161
+
162
+ Comprehensive Audio Coverage. CoTA integrates three audio domains—speech (38.33%), music (14.12%), and environmental sounds (47.55%)—ensuring diverse and representative coverage of real-world auditory contexts. This multi-domain structure captures a wide spectrum of acoustic phenomena, ranging from conversational speech (e.g., speech-to-text translation tasks in CoVoST 2) to intricate musical structures (MusicBench) and fine-grained environmental sound analysis (e.g., AudioSet's rich descriptions of acoustic environments).
163
+
164
+ A distinctive feature of CoTA is its hybrid synthetic-real composition, where synthetic data (Multi-Speaker and Complex Audio, $14.15\%$ of total samples) is strategically incorporated to enhance complex reasoning tasks, such as multi-step logical inference in Complex Audio. Meanwhile, the majority of the dataset is derived from high-quality real-world sources (e.g., MELD for emotion recognition). By unifying tasks across 10 distinct categories, spanning from fundamental classification to advanced tasks like translation and irony detection, CoTA facilitates a hierarchical learning process—an aspect largely absent in traditional datasets constrained to simple labeling tasks.
165
+
166
+ Scalability of Reasoning Complexity. The word count distribution in the CoTA dataset highlights the model's capacity for long-chain reasoning. As shown in Figure 4, most responses fall between 300 and 500 words, allowing for nuanced reasoning and detailed logical steps, particularly in audio and music question answering. This extended length supports transparency in reasoning, ensuring a thorough exploration of complex ideas. For more demanding tasks, such as those in the Multi-Speaker dataset, responses can reach up to 1,500 words. This increase reflects the model's ability to systematically break down intricate problems, demonstrating adaptive reasoning in scenarios requiring a deep understanding of multiple interacting elements.
167
+
168
+ ![](images/c632a41b5a7f412a8c682f3bc6d71109768d4b1c43941df87876b341f545f899.jpg)
169
+
170
+ ![](images/0bd2aa1ff7c3048ba77b507121a9d0abf1df8f3b78910baab538684bec5b6143.jpg)
171
+
172
+ ![](images/0b63d5816c7d0317a054224a41f1c67e98fb31529cf58d18f1df86ff86ef0e74.jpg)
173
+
174
+ ![](images/fc8096f2ec175706b289de4b055035b882781920208ae3008150038fb4a696d5.jpg)
175
+
176
+ ![](images/2de0a197ee7589cf5a822ea1b183f833691d28cda1c03b177d528955bc25806c.jpg)
177
+
178
+ ![](images/dd3a7cdcf4bf1022e1ef3b362fc3dc39661fa746dfbd2244684f3c5d5d0bd44f.jpg)
179
+
180
+ ![](images/d6eae35fa287899c4c41075663ba130ae8f203ee0f1b708b9020787659e4450b.jpg)
181
+ Figure 4: The bar chart shows the data length distribution across nine CoTA sub-datasets, with intervals of 100 on the horizontal axis (0—1200) and proportions on the vertical axis (totaling $100\%$ ). A moving average trend line is overlaid.
182
+
183
+ ![](images/9116a5ec7b70fbbbb30739b8027d02e6fc477dfb33fb817358a70343cda2369e.jpg)
184
+
185
+ ![](images/44be2c35e1b0d98bd8e74345eeddadcfbb53fdc71670efecae6cebf702ece18c.jpg)
186
+
187
+ Conversely, simpler tasks like S2TT elicit more concise responses, typically between 100 and 200 words. This brevity prevents unnecessary elaboration, maintaining clarity and efficiency when a straightforward answer suffices. By adjusting response length based on task complexity, the model showcases its dynamic reasoning capabilities—balancing depth and precision to effectively address a wide range of analytical challenges. Sec. C introduces more evidence about CoTA's scalability of reasoning complexity.
188
+
189
+ # 4 Experiments
190
+
191
+ # 4.1 Experimental Setup
192
+
193
+ Training Details. Our model, Audio-Reasoner, is built on Qwen2-Audio-Instruct [Chu et al., 2024], comprising a total of 8.4 billion parameters. The training process was conducted using the ms-swift framework [Zhao et al., 2024b], employing supervised fine-tuning with full-parameter tuning. We used a maximum learning rate of 1e-5 and trained the model for a single epoch across the entire CoTA dataset.
194
+
195
+ Evaluation Metric. Existing evaluation datasets in the audio domain primarily focus on simple classification tasks or BLEU[Papineni et al., 2002] score-based assessments of model responses. These approaches, however, fail to comprehensively reflect the reasoning capabilities of LALMs. To address this, our evaluation methodology incorporates multiple diverse benchmarks. First, we assessed the model's accuracy on closed-form questions using the MMAU-mini subset [Sakshi et al., 2024]. This subset was specifically chosen since the model's training data does not include multiple-choice questions. Additionally, we evaluated its real-world conversational abilities using the chat and foundation sections of AIR-Bench [Yang et al., 2024b]. These benchmarks encompass various audio modalities, including sound, speech, and music. Beyond these, we also tested the model's performance on traditional audio-related tasks such as speech-to-text translation (S2TT) on CoVoST 2 [Wang et al., 2021] and speech emotion recognition (SER) on MELD dataset [Poria et al., 2019]. A comprehensive summary of the evaluation tasks and datasets is presented in Table 3.
196
+
197
+ Baselines: We primarily select state-of-the-art large audio language models as the baselines for comparison. These include the closed-source models Gemini-1.5-pro [Team et al., 2024], GPT-4o [Hurst et al., 2024], Qwen-audio-turbo [Chu et al., 2023], as well as the open-source models SALMONN [Tang et al., 2023], Qwen-Audio-Chat [Chu et al., 2023], and Qwen2-AudioInstruct [Chu et al., 2024] that also serves as the base model. Additionally, we compared cascade model approaches such as Whisper [Radford et al., 2023] + GPT-4 [Achiam et al., 2023] and a series of mainstream multimodal large language models. [Gong et al., 2023b,a, Kong et al., 2024, Ghosh et al., 2024, Liu et al., 2024b, Su et al., 2023, Wu et al., 2024, Wang et al., 2023, Zhang et al., 2023a]
198
+
199
+ Table 3: Summary of evaluation benchmarks for Audio-Reasoner.
200
+
201
+ <table><tr><td>Dataset</td><td>Split</td><td>Task</td><td>Metric</td></tr><tr><td rowspan="3">MMAU-mini</td><td>Sound</td><td>Sound QA</td><td>ACC</td></tr><tr><td>Speech</td><td>Speech QA</td><td>ACC</td></tr><tr><td>Music</td><td>Music QA</td><td>ACC</td></tr><tr><td rowspan="9">AIR-Bench</td><td>Chat-Sound</td><td>Sound QA</td><td>GPT-4 Eval</td></tr><tr><td>Chat-Speech</td><td>Speech QA</td><td>GPT-4 Eval</td></tr><tr><td>Chat-Music</td><td>Music QA</td><td>GPT-4 Eval</td></tr><tr><td>Chat-MixedAudio</td><td>Complex Audio QA</td><td>GPT-4 Eval</td></tr><tr><td>Foundation-SoundAQA</td><td>Sound QA</td><td>ACC</td></tr><tr><td>Foundation-SER</td><td>Speech QA</td><td>ACC</td></tr><tr><td>Foundation-SIC</td><td>Speaker Intent Classification</td><td>ACC</td></tr><tr><td>Foundation-SNV</td><td>Speaker Number Verification</td><td>ACC</td></tr><tr><td>Foundation-MusicAQA</td><td>Music QA</td><td>ACC</td></tr><tr><td>CoVoST 2</td><td>Test</td><td>Speech-to-Text Translation (S2TT)</td><td>BLEU</td></tr><tr><td>MELD</td><td>Test</td><td>Speech Emotion Recognition (SER)</td><td>ACC</td></tr></table>
202
+
203
+ # 4.2 Main Results
204
+
205
+ To evaluate the effectiveness of Audio-Reasoner, we compare its performance against both closed-source and open-source baselines on benchmark datasets of MMAU-mini and AIR-Bench chat/foundation, CoVoST 2 (zn/en subset) and MELD. The results in Tables 4, 5, 6, 7 and 8 clearly demonstrate that Audio-Reasoner significantly outperforms existing models, setting a new state-of-the-art in audio reasoning tasks.
206
+
207
+ Table 4: Performance comparison on MMAU-mini. The $\{\mathbf{so},\mathbf{mu},\mathbf{sp}\}$ indicates whether "sound", "music", and "speech" have been used in training.
208
+
209
+ <table><tr><td>Model</td><td>Size</td><td>{so, mu, sp}</td><td>Sound</td><td>Music</td><td>Speech</td><td>Avg</td></tr><tr><td colspan="7">Closed-Source</td></tr><tr><td>gpt4o + caption</td><td>-</td><td>---</td><td>63.36</td><td>60.77</td><td>53.15</td><td>57.30</td></tr><tr><td>gemini-1.5-pro</td><td>-</td><td>---</td><td>56.75</td><td>49.40</td><td>58.55</td><td>54.90</td></tr><tr><td colspan="7">Open-Source</td></tr><tr><td>LTU</td><td>7B</td><td>Y Y N</td><td>22.52</td><td>9.69</td><td>17.71</td><td>16.89</td></tr><tr><td>LTU-AS</td><td>7B</td><td>Y Y Y</td><td>23.35</td><td>9.10</td><td>20.60</td><td>17.68</td></tr><tr><td>Audio Flamingo - Chat</td><td>2.2B</td><td>Y Y N</td><td>23.42</td><td>15.26</td><td>11.41</td><td>16.69</td></tr><tr><td>GAMA</td><td>7B</td><td>Y Y N</td><td>41.44</td><td>32.33</td><td>18.91</td><td>30.90</td></tr><tr><td>GAMA-IT</td><td>7B</td><td>Y Y N</td><td>43.24</td><td>28.44</td><td>18.91</td><td>30.20</td></tr><tr><td>MU-LLaMA</td><td>7B</td><td>N Y N</td><td>40.84</td><td>32.63</td><td>22.22</td><td>31.90</td></tr><tr><td>SALMONN</td><td>13B</td><td>Y Y Y</td><td>41.00</td><td>34.80</td><td>25.50</td><td>33.70</td></tr><tr><td>Qwen-audio-Chat</td><td>8.4B</td><td>Y Y Y</td><td>55.25</td><td>44.00</td><td>30.03</td><td>43.10</td></tr><tr><td>Qwen2-Audio-Instruct</td><td>8.4B</td><td>Y Y Y</td><td>54.95</td><td>50.98</td><td>42.04</td><td>49.20</td></tr><tr><td colspan="7">Ours</td></tr><tr><td>Audio-Reasoner</td><td>8.4B</td><td>Y Y Y</td><td>60.06</td><td>64.30</td><td>60.70</td><td>61.71</td></tr></table>
210
+
211
+ Performance on MMAU-mini. MMAU-mini in Table 4 assesses multimodal audio understanding across three major domains: sound, music, and speech. We first make a comparison with closed-source models. Audio-Reasoner achieves the highest overall score (61.71%) outperforming GPT-4o (57.30%) and Gemini-1.5-Pro (54.90%). Music reasoning shows the most significant improvement—Audio-Reasoner: 64.30%, GPT-4o 60.77% and Gemini-1.5-Pro: 49.40%. This indicates superior
212
+
213
+ Table 5: Performance comparison on AIR-Bench chat benchmark.
214
+
215
+ <table><tr><td>Model</td><td>Sound</td><td>Music</td><td>Speech</td><td>Mixed Audio</td><td>Average</td></tr><tr><td colspan="6">Closed-Source</td></tr><tr><td>Whisper+GPT4</td><td>-</td><td>-</td><td>7.54</td><td>-</td><td>7.54</td></tr><tr><td>Qwen-Audio-Turbo</td><td>6.59</td><td>5.98</td><td>7.04</td><td>5.77</td><td>6.34</td></tr><tr><td>Gemini-1.5-pro</td><td>5.49</td><td>5.06</td><td>6.97</td><td>5.27</td><td>5.70</td></tr><tr><td colspan="6">Open-Source</td></tr><tr><td>Macaw-LLM</td><td>1.01</td><td>0.91</td><td>0.97</td><td>1.00</td><td>1.01</td></tr><tr><td>SpeechGPT</td><td>0.95</td><td>0.95</td><td>1.57</td><td>1.14</td><td>1.15</td></tr><tr><td>Next-gpt</td><td>4.76</td><td>4.18</td><td>3.86</td><td>2.92</td><td>4.13</td></tr><tr><td>Pandagpt</td><td>5.46</td><td>5.06</td><td>3.58</td><td>2.93</td><td>4.25</td></tr><tr><td>BLSP</td><td>5.55</td><td>5.08</td><td>6.17</td><td>4.52</td><td>5.33</td></tr><tr><td>Qwen-Audio</td><td>6.95</td><td>5.52</td><td>6.47</td><td>5.38</td><td>6.08</td></tr><tr><td>SALMONN</td><td>6.28</td><td>5.95</td><td>6.16</td><td>6.08</td><td>6.11</td></tr><tr><td>Qwen2-Audio-Instruct</td><td>6.99</td><td>6.79</td><td>7.18</td><td>6.77</td><td>6.93</td></tr><tr><td colspan="6">Ours</td></tr><tr><td>Audio-Reasoner</td><td>7.68</td><td>8.05</td><td>8.19</td><td>6.65</td><td>7.94</td></tr></table>
216
+
217
+ musical structure comprehension, enabled by CoTA's diverse music-based tasks. Speech-based reasoning is also notably strong—Audio-Reasoner: $60.70\%$ , GPT-4o: $53.15\%$ and Gemini-1.5-Pro: $58.55\%$ . This validates CoTA's impact in training models for context-dependent spoken language understanding. In comparison with open-source models, Audio-Reasoner surpasses all open-source models, with the next-best, Qwen2-Audio-Instruct, trailing by 12.51 percentage points $(49.20\%)$ . Across individual domains, Audio-Reasoner achieves $60.06\%$ in sound reasoning (beating Qwen2-Audio's $54.95\%$ ), $64.30\%$ in music (outperforming Qwen2-Audio's $50.98\%$ ), and $60.70\%$ in speech (exceeding Qwen2-Audio's $42.04\%$ ).
218
+
219
+ Performance on AIR-Bench chat. (1) chat benchmark. AIR-Bench chat in Table 5 evaluates contextual and conversational reasoning across four audio types: sound, music, speech, and mixed audio. Regarding closed-source models, Audio-Reasoner achieves the highest overall score (7.94), outperforming Gemini-1.5-Pro (5.70) and Whisper+GPT-4 (7.54). It shows the most significant improvements in music (8.05) and speech (8.19). Additionally, its mixed audio reasoning score (6.65) demonstrates proficiency in handling multi-source audio tasks. As for comparison with open-source models, Audio-Reasoner sets a new benchmark, surpassing Qwen2-Audio (6.93) by 1.01 points. Across domains, it achieves 7.68 in sound (beating Qwen2-Audio's 6.99), 8.05 in music (exceeding Qwen2-Audio's 6.79), and 8.19 in speech (outperforming Qwen-2-Audio's 7.18), showcasing balanced expertise.
220
+
221
+ Table 6: Performance comparison on AIR-Bench foundation benchmark.
222
+
223
+ <table><tr><td rowspan="2">Model</td><td>AIR-Sound</td><td>AIR-Music</td><td colspan="3">AIR-Speech</td><td rowspan="2">Average</td></tr><tr><td>SoundAQA</td><td>MusicAQA</td><td>SER</td><td>SIC</td><td>SNV</td></tr><tr><td>Closed-Source</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>whisper+GPT4</td><td>-</td><td>-</td><td>59.5</td><td>87.7</td><td>30.0</td><td>59.1</td></tr><tr><td>Qwen-Audio-Turbo</td><td>62.8</td><td>62.5</td><td>60.0</td><td>56.4</td><td>54.3</td><td>59.2</td></tr><tr><td>Open-Source</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>NEXT-GPT</td><td>18.8</td><td>47.1</td><td>25.7</td><td>25.6</td><td>25.4</td><td>28.5</td></tr><tr><td>SpeechGPT</td><td>33.9</td><td>31.3</td><td>37.6</td><td>45.8</td><td>32.6</td><td>36.2</td></tr><tr><td>BLSP</td><td>36.1</td><td>31.0</td><td>27.4</td><td>46.6</td><td>28.1</td><td>33.8</td></tr><tr><td>PandaGPT</td><td>48.7</td><td>50.7</td><td>26.0</td><td>28.5</td><td>43.2</td><td>39.4</td></tr><tr><td>SALMONN</td><td>28.4</td><td>54.6</td><td>29.9</td><td>36.7</td><td>34.3</td><td>36.8</td></tr><tr><td>Qwen-Audio-Chat</td><td>64.6</td><td>48.2</td><td>43.2</td><td>77.8</td><td>35.3</td><td>53.8</td></tr><tr><td>Ours</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Audio-Reasoner</td><td>65.7</td><td>55.2</td><td>60.5</td><td>88.1</td><td>56.3</td><td>65.2</td></tr></table>
224
+
225
+ (2) foundation benchmark. AIR-Bench foundation in Table 6 evaluates fundamental audio understanding across three primary categories: sound, music, and speech, with speech further divided into three subdomains: Speech Emotion Recognition (SER), Speaker Identification and Classification (SIC), and Speech Number Variation (SNV). Audio-Reasoner achieves the highest overall score (65.2), outperforming both closed-source and open-source baselines. Compared to the strongest closed-source model, Qwen-Audio-Turbo (59.2), Audio-Reasoner leads by 6.0 points, demonstrating superior reasoning across all audio domains. With the sound category, Audio-Reasoner attains 65.7, surpassing Qwen-Audio-Chat (64.6) and Qwen-Audio-Turbo (62.8), highlighting its strong ability in environmental and non-speech audio understanding. For music reasoning, Audio-Reasoner achieves 55.2, significantly outperforming Qwen-Audio-Turbo (48.2) and all open-source baselines, indicating better comprehension of musical structures and attributes. Regarding speech reasoning, Audio-Reasoner sets new state-of-the-art results across all subdomains. It attains 60.5 in SER (v.s. Qwen-Audio-Turbo's 60.0), 88.1 in SIC (surpassing Whisper+GPT-4's 87.7), and 56.3 in SNV (exceeding Qwen-Audio-Turbo's 54.3). The substantial lead in SIC showcases its exceptional speaker recognition capability, benefiting from CoTA's step-by-step reasoning process.
226
+
227
+ Table 7: Performance comparison of the speech-to-text translation (S2TT) task on CoVoST 2 dataset. We consider the mutual conversion between Chinese and English as training and evaluation data.
228
+
229
+ <table><tr><td rowspan="2">Model</td><td colspan="5">EN-ZN</td><td colspan="5">ZN-EN</td><td rowspan="2">Avg</td></tr><tr><td>BLEU1</td><td>BLEU2</td><td>BLEU3</td><td>BLEU4</td><td>Avg</td><td>BLEU1</td><td>BLEU2</td><td>BLEU3</td><td>BLEU4</td><td>Avg</td></tr><tr><td colspan="12">Closed-Source</td></tr><tr><td>Gemini-1.5-pro</td><td>68.25</td><td>49.12</td><td>37.81</td><td>29.79</td><td>46.24</td><td>51.83</td><td>26.61</td><td>16.27</td><td>10.88</td><td>26.39</td><td>36.32</td></tr><tr><td colspan="12">Open-Source</td></tr><tr><td>Qwen2-Audio-Instruct</td><td>58.63</td><td>39.55</td><td>28.71</td><td>21.40</td><td>37.07</td><td>48.52</td><td>24.31</td><td>14.65</td><td>9.24</td><td>24.18</td><td>30.63</td></tr><tr><td colspan="12">Ours</td></tr><tr><td>Audio-Reasoner</td><td>72.89</td><td>54.17</td><td>42.46</td><td>33.95</td><td>50.87</td><td>56.50</td><td>29.99</td><td>18.37</td><td>11.62</td><td>29.13</td><td>40.00</td></tr></table>
230
+
231
+ Performance on CoVoST 2. The CoVoST 2 dataset in Table 7 evaluates speech-to-text translation, a fundamental task in cross-lingual speech understanding. Audio-Reasoner demonstrates the strengths of Audio-Reasoner in speech-to-text translation across both English-to-Chinese (EN-ZN) and Chinese-to-English (ZN-EN) tasks.
232
+
233
+ For EN-ZN translation, Audio-Reasoner outperforms both closed-source Gemini-1.5-pro and open-source Qwen2-Audio-Instruct. With an average BLEU score of 50.87, it surpasses Gemini-1.5-pro's score of 46.24 by 4.63 points and Qwen2-Audio-Instruct's 37.07 by a significant 13.80 points. Audio-Reasoner's BLEU-4 score of 33.95 highlights its ability to generate fluent, high-quality translations, even for more complex sentence structures. In the ZN-EN translation task, Audio-Reasoner continues to show superiority with an average BLEU score of 29.13, outperforming Gemini-1.5-pro (with a score of 26.39) by 2.74 points and Qwen2-Audio-Instruct (with a score of 24.18) by 4.95 points. Its BLEU-4 score of 11.62 reflects an enhanced ability to produce coherent and accurate translations, particularly in more challenging, longer sentences.
234
+
235
+ These results demonstrate that Audio-Reasoner excels in both capturing cross-lingual semantic alignment, consistently outperforming existing models in speech-to-text translation tasks.
236
+
237
+ Table 8: Performance comparison of the speech emotion recognition (SER) task on MELD dataset.
238
+
239
+ <table><tr><td>Model</td><td>Unweighted_ACC</td></tr><tr><td>EMO-box</td><td>31.5</td></tr><tr><td>SALMONN</td><td>39.2</td></tr><tr><td>Qwen2-Audio-Instruct</td><td>49.9</td></tr><tr><td>Audio-Reasoner</td><td>53.9</td></tr></table>
240
+
241
+ Performance on MELD. The MELD dataset in Table 8 evaluates speech emotion recognition, a crucial task for understanding affective states in spoken language. Audio-Reasoner achieves the highest unweighted accuracy of 53.9, outperforming all baseline models, including the previous state-of-the-art Qwen2-Audio-Instruct (49.9) by 4.0 percentage points. The results further validate that Audio-Reasoner excels in emotion perception and contextual audio understanding, solidifying its effectiveness in real-world speech emotion recognition tasks.
242
+
243
+ ![](images/9322ca54bc2fde2f6f2c9939c1e41fb674388081d73e82a1630c156cc78330a8.jpg)
244
+ Figure 5: Example of Audio-Reasoner answering music-based question.
245
+
246
+ Analyze the interplay between the guitar riff and the spoken commentary. How do they contribute to the overall impression or potential purpose of the piece?
247
+
248
+ # 1
249
+
250
+ ![](images/2037f12e5111931657463be8d6a3644c6f01c10ec8a126d8d0536b94ae5f6035.jpg)
251
+
252
+ # <THINK>
253
+
254
+ <PLANNING> The question asks about interplay and contribution. Analyze the music clip, focusing on: Characterizing the guitar riff: style, rhythm, melodic features. Characterizing the spoken commentary: tone, content, frequency. Analyzing their relationship: explain, praise, interrupt. Inferring the overall impression and purpose: instruction, demonstration, etc.? <PLANNING>
255
+
256
+ <Caption> The audio has a blues - rock guitar riff on a clean - toned electric guitar, moderate tempo, 4/4 time. There's male spoken commentary, informal and relaxed, suggesting instruction or informal performance.<Caption>
257
+
258
+ <REASONING> Consider scenarios:
259
+
260
+ **Scenario 1: Instructional Context
261
+
262
+ If commentary explains riff aspects (e.g., "bend on third string", "E minor blues progression"), it's instructional. Guitar demonstrates, commentary clarifies. If it gives positive reinforcement ("very cool riff, man"), it's a casual lesson or performance with teaching.
263
+
264
+ **Scenario 2: Informal Performance/Demonstration
265
+
266
+ If commentary is minimal (reactions like "yeah", "alright"), it's an informal performance. Riffs simplicity and repetition suggest a demonstration for less - experienced musicians.
267
+
268
+ The audio leans towards scenario 1. "very cool riff, man" implies positive assessment for teaching or demonstration. Clean tone and moderate pace suggest instruction/demonstration, not a raw blues performance.
269
+
270
+ Refining: E minor blues with E - B chord progression and C# implies commentary helps demonstrate their use. </REASONING>
271
+
272
+ <SUMMARY> The blues - rock, moderate - tempo guitar riff is complemented by relaxed, informal spoken commentary. The interplay indicates a purpose of instruction or demonstration of the riff's construction. The positive comment supports an informal teaching or relaxed demonstration.</SUMMARY>
273
+
274
+ # </THINK>
275
+
276
+ <RESPONSE>
277
+
278
+ The interplay creates an impression of informal instruction or demonstration. The clean - toned guitar plays a moderate - tempo blues - rock riff, and the male voice gives relaxed commentary like "very cool riff, man". The riff's simplicity, moderate pace, and E minor blues elements suggest the purpose is to demonstrate the riff and how to play its features, not a formal performance or raw blues.
279
+
280
+ </RESPONSE>
281
+
282
+ Why Audio-Reasoner Excels. While open-source models typically lag behind closed-source alternatives, Audio-Reasoner narrows this gap significantly. It outperforms Gemini-1.5-Pro in multiple categories and achieves competitive results against GPT-4o. A key factor driving Audio-Reasoner's state-of-the-art performance is the CoTA dataset, which provides an unparalleled foundation for comprehensive, structured, and context-aware audio reasoning. The dataset contributes to Audio-Reasoner's success in two critical ways: (1) CoTA's comprehensive audio reasoning data. Unlike models trained on fragmented or task-specific datasets, Audio-Reasoner benefits from CoTA's extensive and balanced coverage of sound, speech, and music-based reasoning. This ensures robust adaptation across diverse audio tasks, enhancing its ability to generalize across multimodal benchmarks. (2) CoTA's strong logical and contextual inference. CoTA's emphasis on structured reasoning and contextual awareness enables Audio-Reasoner to outperform existing models in long-form reasoning (MMAU-mini) and conversational audio understanding (AIR-Bench chat). By leveraging deep logical inference and nuanced context retention, it surpasses prior approaches in handling complex speech interactions and multimodal reasoning.
283
+
284
+ These strengths position Audio-Reasoner as a breakthrough in open-source audio intelligence, setting a new benchmark for multimodal reasoning in speech comprehension, translation, and real-world audio interpretation.
285
+
286
+ # 4.3 Case Study
287
+
288
+ Here we show a case in Figure 5, demonstrating the audio-based reasoning capability of Audio-Reasoner. The system analyzes the interplay between a guitar riff and a spoken commentary, systematically breaking down their characteristics and relationship. It identifies key musical features, assesses the commentary's tone and intent, and infers the overall purpose of the piece. By considering different scenarios, Audio-Reasoner determines that the interplay suggests an informal instructional or demonstrative context rather than a formal performance. This example highlights the model's ability to extract meaningful insights from audio, combining musical analysis with contextual interpretation.
289
+
290
+ For more remarkable examples, please refer to Sec. D.
291
+
292
+ # 5 Conclusion
293
+
294
+ In this work, we introduced Audio-Reasoner, a large audio language model (LALM) designed to advance deep reasoning in audio-based tasks. By leveraging inference scaling and structured chain-of-thought (CoT) reasoning, we demonstrated significant performance improvements across key benchmarks. Central to our approach is CoTA, a large-scale, high-quality dataset containing around 1.2 million structured reasoning samples, which we generated through a systematic pipeline of annotation refinement, question synthesis, and CoT generation.
295
+
296
+ Our experimental results highlight the effectiveness of structured reasoning in the audio domain, achieving state-of-the-art performance on MMAU-mini $(+25.04\%)$ , CoVoST 2 $(+8.31\%)$ , and MELD $(+8.01\%)$ . These findings underscore the critical role of reasoning-rich datasets and inference scaling in multimodal learning, particularly for audio-based tasks where existing models struggle with complex reasoning.
297
+
298
+ Moving forward, we believe that structured CoT methodologies will play a crucial role in enhancing the reasoning capabilities of LALMs. Future research directions include extending Audio-Reasoner's capabilities to multi-turn dialogue reasoning, cross-modal knowledge integration, and real-world applications in speech-driven AI systems. By publicly releasing our model, dataset, and code, we aim to foster further innovation and collaboration in the field of audio reasoning, paving the way for more advanced and interpretable LALMs.
299
+
300
+ # References
301
+
302
+ Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023.
303
+ Cameron B Browne, Edward Powley, Daniel Whitehouse, Simon M Lucas, Peter I Cowling, Philipp Rohlfshagen, Stephen Tavener, Diego Perez, Spyridon Samothrakis, and Simon Colton. A survey of monte carlo tree search methods. IEEE Transactions on Computational Intelligence and AI in Games (T-CIAIG), (1):1-43, 2012.
304
+ Santiago Castro, Devamanyu Hazarika, Verónica Pérez-Rosas, Roger Zimmermann, Rada Mihalcea, and Soujanya Poria. Towards multimodal sarcasm detection. In Annual Meeting of the Association for Computational Linguistics (ACL), pages 4619-4629, 2019.
305
+ Yunfei Chu, Jin Xu, Xiaohuan Zhou, Qian Yang, Shiliang Zhang, Zhijie Yan, Chang Zhou, and Jingren Zhou. Qwen-audio: Advancing universal audio understanding via unified large-scale audio-language models. arXiv preprint arXiv:2311.07919, 2023.
306
+ Yunfei Chu, Jin Xu, Qian Yang, Haojie Wei, Xipin Wei, Zhifang Guo, Yichong Leng, Yuanjun Lv, Jinzheng He, Junyang Lin, et al. Qwen2-audio technical report. arXiv preprint arXiv:2407.10759, 2024.
307
+ Yingqian Cui, Pengfei He, Xianfeng Tang, Qi He, Chen Luo, Jiliang Tang, and Yue Xing. A theoretical understanding of chain-of-thought: Coherent reasoning and error-aware demonstration. arXiv preprint arXiv:2410.16540, 2024.
308
+ Alexandre Defossez, Laurent Mazare, Manu Orsini, Amelie Royer, Patrick Pérez, Hervé Jégou, Edouard Grave, and Neil Zeghidour. Moshi: a speech-text foundation model for real-time dialogue. arXiv preprint arXiv:2410.00037, 2024.
309
+ Yuntian Deng, Yejin Choi, and Stuart Shieber. From explicit cot to implicit cot: Learning to internalize cot step by step. arXiv preprint arXiv:2405.14838, 2024.
310
+ Konstantinos Drossos, Samuel Lipping, and Tuomas Virtanen. Clotho: An audio captioning dataset. In International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 736-740, 2020.
311
+ Zhihao Du, Yuxuan Wang, Qian Chen, Xian Shi, Xiang Lv, Tianyu Zhao, Zhifu Gao, Yexin Yang, Changfeng Gao, Hui Wang, et al. Cosyvoice 2: Scalable streaming speech synthesis with large language models. arXiv preprint arXiv:2412.10117, 2024.
312
+
313
+ Logan Engstrom, Andrew Ilyas, Shibani Santurkar, Dimitris Tsipras, Firdaus Janoos, Larry Rudolph, and Aleksander Madry. Implementation matters in deep policy gradients: A case study onppo and trpo. arXiv preprint arXiv:2005.12729, 2020.
314
+ Hao Fei, Shengqiong Wu, Wei Ji, Hanwang Zhang, Meishan Zhang, Mong Li Lee, and Wynne Hsu. Video-of-thought: step-by-step video reasoning from perception to cognition. In International Conference on Machine Learning (ICML), pages 13109-13125, 2024.
315
+ Chaoyou Fu, Haojia Lin, Xiong Wang, Yi-Fan Zhang, Yunhang Shen, Xiaoyu Liu, Yangze Li, Zuwei Long, Heting Gao, Ke Li, et al. Vita-1.5: Towards gpt-4o level real-time vision and speech interaction. arXiv preprint arXiv:2501.01957, 2025.
316
+ Jort F Gemmeke, Daniel PW Ellis, Dylan Freedman, Aren Jansen, Wade Lawrence, R Channing Moore, Manoj Plakal, and Marvin Ritter. Audio set: An ontology and human-labeled dataset for audio events. In International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 776-780. IEEE, 2017.
317
+ Sreyan Ghosh, Sonal Kumar, Ashish Seth, Chandra Kiran Reddy Evuru, Utkarsh Tyagi, S Sakshi, Oriol Nieto, Ramani Duraiswami, and Dinesh Manocha. Gama: A large audio-language model with advanced audio understanding and complex reasoning abilities. In Empirical Methods in Natural Language Processing, pages 6288-6313, 2024.
318
+ Yuan Gong, Alexander H Liu, Hongyin Luo, Leonid Karlinsky, and James Glass. Joint audio and speech understanding. In Automatic Speech Recognition and Understanding Workshop (ASRU), pages 1-8, 2023a.
319
+ Yuan Gong, Hongyin Luo, Alexander H Liu, Leonid Karlinsky, and James Glass. Listen, think, and understand. arXiv preprint arXiv:2305.10790, 2023b.
320
+ Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025.
321
+ Jarvis Guo, Tuney Zheng, Yuelin Bai, Bo Li, Yubo Wang, King Zhu, Yizhi Li, Graham Neubig, Wenhu Chen, and Xiang Yue. Mammoth-vl: Eliciting multimodal reasoning with instruction tuning at scale. arXiv preprint arXiv:2412.05237, 2024.
322
+ Songhao Han, Wei Huang, Hairong Shi, Le Zhuo, Xiu Su, Shifeng Zhang, Xu Zhou, Xiaojuan Qi, Yue Liao, and Si Liu. Videospresso: A large-scale chain-of-thought dataset for fine-grained video reasoning via core frame selection. arXiv preprint arXiv:2411.14794, 2024.
323
+ Binyuan Hui, Jian Yang, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Lei Zhang, Tianyu Liu, Jiajun Zhang, Bowen Yu, Keming Lu, et al. Qwen2. 5-coder technical report. arXiv preprint arXiv:2409.12186, 2024.
324
+ Aaron Hurst, Adam Lerner, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024.
325
+ Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024.
326
+ Feihu Jin, Yifan Liu, and Ying Tan. Zero-shot chain-of-thought reasoning guided by evolutionary algorithms in large language models. arXiv preprint arXiv:2402.05376, 2024.
327
+ Chris Dongjoo Kim, Byeongchang Kim, Hyunmin Lee, and Gunhee Kim. Audiocaps: Generating captions for audios in the wild. In *Nations of the Americas* Chapter of the Association for Computational Linguistics (NAACL), pages 119-132, 2019.
328
+ Zhifeng Kong, Arushi Goel, Rohan Badlani, Wei Ping, Rafael Valle, and Bryan Catanzaro. Audio flamingo: A novel audio language model with few-shot learning and dialogue abilities. In International Conference on Machine Learning (ICML), pages 25125-25148, 2024.
329
+
330
+ Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024a.
331
+ Shansong Liu, Atin Sakkeer Hussain, Chenshuo Sun, and Ying Shan. Music understanding llama: Advancing text-to-music generation with question answering and captioning. In International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 286-290, 2024b.
332
+ Ziyang Ma, Mingjie Chen, Hezhao Zhang, Zhisheng Zheng, Wenxi Chen, Xiquan Li, Jiaxin Ye, Xie Chen, and Thomas Hain. Emobox: Multilingual multi-corpus speech emotion recognition toolkit and benchmark. arXiv preprint arXiv:2406.07162, 2024.
333
+ Ziyang Ma, Zhuo Chen, Yuping Wang, Eng Siong Chng, and Xie Chen. Audio-cot: Exploring chain-of-thought reasoning in large audio language model. arXiv preprint arXiv:2501.07246, 2025.
334
+ Jan Melechovsky, Zixun Guo, Deepanway Ghosal, Navonil Majumder, Dorien Herremans, and Soujanya Poria. Mustango: Toward controllable text-to-music generation. In *Nations of the Americas Chapter of the Association for Computational Linguistics (NAACL)*, pages 8286-8309, 2024.
335
+ Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling. arXiv preprint arXiv:2501.19393, 2025.
336
+ Vassil Panayotov, Guoguo Chen, Daniel Povey, and Sanjeev Khudanpur. Librispeech: an asr corpus based on public domain audio books. In International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 5206-5210, 2015.
337
+ Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. Bleu: a method for automatic evaluation of machine translation. In Annual Meeting of the Association for Computational Linguistics (ACL), pages 311-318, 2002.
338
+ Soujanya Poria, Devamanyu Hazarika, Navonil Majumder, Gautam Naik, Erik Cambria, and Rada Mihalcea. Meld: A multimodal multi-party dataset for emotion recognition in conversations. In Annual Meeting of the Association for Computational Linguistics (ACL), pages 527-536, 2019.
339
+ Yujia Qin, Shihao Liang, Yining Ye, Kunlun Zhu, Lan Yan, Yaxi Lu, Yankai Lin, Xin Cong, Xiangru Tang, Bill Qian, et al. Toolllm: Facilitating large language models to master 16000+ real-world apis. arXiv preprint arXiv:2307.16789, 2023.
340
+ Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, and Ilya Sutskever. Robust speech recognition via large-scale weak supervision. In International Conference on Machine Learning (ICML), pages 28492-28518, 2023.
341
+ Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. In Advances in Neural Information Processing Systems (NeurIPS), pages 53728-53741, 2023.
342
+ S Sakshi, Utkarsh Tyagi, Sonal Kumar, Ashish Seth, Ramaneswaran Selvakumar, Oriol Nieto, Ramani Duraiswami, Sreyan Ghosh, and Dinesh Manocha. Mmau: A massive multi-task audio understanding and reasoning benchmark. In International Conference on Learning Representations (ICLR), 2024.
343
+ Hao Shao, Shengju Qian, Han Xiao, Guanglu Song, Zhuofan Zong, Letian Wang, Yu Liu, and Hongsheng Li. Visual cot: Unleashing chain-of-thought reasoning in multi-modal language models. arXiv preprint arXiv:2403.16999, 2024a.
344
+ Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024b.
345
+
346
+ Zayne Sprague, Fangcong Yin, Juan Diego Rodriguez, Dongwei Jiang, Manya Wadhwa, Prasann Singhal, Xinyu Zhao, Xi Ye, Kyle Mahowald, and Greg Durrett. To cot or not to cot? chain-of-thought helps mainly on math and symbolic reasoning. arXiv preprint arXiv:2409.12183, 2024.
347
+ Kaya Stechly, Karthik Valmeekam, and Subbarao Kambhampati. Chain of thoughtlessness? an analysis of cot in planning. In Advances in Neural Information Processing Systems (NeurIPS), pages 29106-29141, 2024.
348
+ Yixuan Su, Tian Lan, Huayang Li, Jialu Xu, Yan Wang, and Deng Cai. Pandagpt: One model to instruction-follow them all. In Workshop on Taming Large Language Models: Controllability in the era of Interactive Assistants (TLLM), pages 11-23, 2023.
349
+ Changli Tang, Wenyi Yu, Guangzhi Sun, Xianzhao Chen, Tian Tan, Wei Li, Lu Lu, Zejun Ma, and Chao Zhang. *Salmon: Towards generic hearing abilities for large language models.* arXiv preprint arXiv:2310.13289, 2023.
350
+ Yunlong Tang, Gen Zhan, Li Yang, Yiting Liao, and Chenliang Xu. Cardiff: Video salient object ranking chain of thought reasoning for saliency prediction with diffusion. arXiv preprint arXiv:2408.12009, 2024.
351
+ Gemini Team, Petko Georgiev, Ving Ian Lei, Ryan Burnell, Libin Bai, Anmol Gulati, Garrett Tanzer, Damien Vincent, Zhufeng Pan, Shibo Wang, et al. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530, 2024.
352
+ Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025.
353
+ Miles Turpin, Julian Michael, Ethan Perez, and Samuel Bowman. Language models don't always say what they think: Unfaithful explanations in chain-of-thought prompting. In Advances in Neural Information Processing Systems (NeurIPS), pages 74952-74965, 2023.
354
+ Changhan Wang, Anne Wu, Jiatao Gu, and Juan Pino. Covost 2 and massively multilingual speech translation. In Conference of the International Speech Communication Association (Interspeech), pages 2247-2251, 2021.
355
+ Chen Wang, Minpeng Liao, Zhongqiang Huang, Jinliang Lu, Junhong Wu, Yuchen Liu, Chengqing Zong, and Jiajun Zhang. Blsp: Bootstrapping language-speech pre-training via behavior alignment of continuation writing. arXiv preprint arXiv:2309.00916, 2023.
356
+ Yan Wang, Yawen Zeng, Jingsheng Zheng, Xiaofen Xing, Jin Xu, and Xiangmin Xu. Videocot: A video chain-of-thought dataset with active annotation tool. In Workshop on Advances in Language and Vision Research (ALVR), pages 92-101, 2024.
357
+ Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. In Advances in Neural Information Processing Systems (NeurIPS), pages 24824-24837, 2022.
358
+ Peng Wen, Teng-Gen Hu, Robert J Linhardt, Sen-Tai Liao, Hong Wu, and Yu-Xiao Zou. Mulberry: A review of bioactive compounds and advanced processing technology. Trends in food science & technology, 83:138-158, 2019.
359
+ Shengqiong Wu, Hao Fei, Leigang Qu, Wei Ji, and Tat-Seng Chua. Next-gpt: Any-to-any multimodal llm. In International Conference on Machine Learning (ICML), pages 53366-53397, 2024.
360
+ Zhifei Xie and Changqiao Wu. Mini-omni: Language models can hear, talk while thinking in streaming. arXiv preprint arXiv:2408.16725, 2024a.
361
+ Zhifei Xie and Changqiao Wu. Mini-omni2: Towards open-source gpt-4o with vision, speech and duplex capabilities. arXiv preprint arXiv:2410.11190, 2024b.
362
+ Guowei Xu, Peng Jin, Li Hao, Yibing Song, Lichao Sun, and Li Yuan. Llava-o1: Let vision language models reason step-by-step. arXiv preprint arXiv:2411.10440, 2024.
363
+
364
+ An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, et al. Qwen2. 5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122, 2024a.
365
+ Qian Yang, Jin Xu, Wenrui Liu, Yunfei Chu, Ziyue Jiang, Xiaohuan Zhou, Yichong Leng, Yuanjun Lv, Zhou Zhao, Chang Zhou, et al. Air-bench: Benchmarking large audio-language models via generative comprehension. In Annual Meeting of the Association for Computational Linguistics (ACL), pages 1979-1998, 2024b.
366
+ Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Tom Griffiths, Yuan Cao, and Karthik Narasimhan. Tree of thoughts: Deliberate problem solving with large language models. In Advances in Neural Information Processing Systems (NeurIPS), pages 11809-11822, 2023.
367
+ Yongcheng Zeng, Guoqing Liu, Weiyu Ma, Ning Yang, Haifeng Zhang, and Jun Wang. Token-level direct preference optimization. In International Conference on Machine Learning (ICML), pages 58348-58365, 2024.
368
+ Dong Zhang, Shimin Li, Xin Zhang, Jun Zhan, Pengyu Wang, Yaqian Zhou, and Xipeng Qiu. Speechgpt: Empowering large language models with intrinsic cross-modal conversational abilities. arXiv preprint arXiv:2305.11000, 2023a.
369
+ Dong Zhang, Shimin Li, Xin Zhang, Jun Zhan, Pengyu Wang, Yaqian Zhou, and Xipeng Qiu. Speechgpt: Empowering large language models with intrinsic cross-modal conversational abilities. In Empirical Methods in Natural Language Processing (EMNLP), pages 15757-15773, 2023b.
370
+ Ruohong Zhang, Bowen Zhang, Yanghao Li, Haotian Zhang, Zhiqing Sun, Zhe Gan, Yinfei Yang, Ruoming Pang, and Yiming Yang. Improve vision language model chain-of-thought reasoning. arXiv preprint arXiv:2410.16198, 2024a.
371
+ Yuxiang Zhang, Shangxi Wu, Yuqi Yang, Jiangming Shu, Jinlin Xiao, Chao Kong, and Jitao Sang. o1-coder: an o1 replication for coding. arXiv preprint arXiv:2412.00154, 2024b.
372
+ Zhuosheng Zhang, Aston Zhang, Mu Li, and Alex Smola. Automatic chain of thought prompting in large language models. arXiv preprint arXiv:2210.03493, 2022.
373
+ Yu Zhao, Huifeng Yin, Bo Zeng, Hao Wang, Tianqi Shi, Chenyang Lyu, Longyue Wang, Weihua Luo, and Kaifu Zhang. Marco-ol: Towards open reasoning models for open-ended solutions. arXiv preprint arXiv:2411.14405, 2024a.
374
+ Yuze Zhao, Jintao Huang, Jinghan Hu, Xingjun Wang, Yunlin Mao, Daoze Zhang, Zeyinzi Jiang, Zhikai Wu, Baole Ai, Ang Wang, et al. Swift: a scalable lightweight infrastructure for fine-tuning. arXiv preprint arXiv:2408.05517, 2024b.
375
+ Qiji Zhou, Ruochen Zhou, Zike Hu, Panzhong Lu, Siyang Gao, and Yue Zhang. Image-of-thought prompting for visual reasoning refinement in multimodal large language models. arXiv preprint arXiv:2405.13872, 2024.
376
+ Anni Zou, Zhuosheng Zhang, Hai Zhao, and Xiangru Tang. Generalizable chain-of-thought prompting in mixed-task scenarios with large language models. arXiv preprint arXiv:2310.06692, 2023.
377
+
378
+ # A Prompt Details
379
+
380
+ A universally applicable method for writing prompts involves three key components: a clear task definition, a structured example, and a precise format specification. Our prompt adheres to this methodology by first defining the task explicitly, outlining the need for detailed audio descriptions and progressively challenging questions. It then provides a structured example that demonstrates the expected output format, ensuring clarity and minimizing ambiguity. Lastly, it specifies the exact formatting rules using delimiters such as <caption>...</caption> and <question1>...</question1>, ensuring consistency in responses. This approach guarantees efficiency by eliminating interpretative variance, allowing for precise and reproducible outputs. When drafting this prompt, we adhered to a structured approach to maximize clarity and effectiveness. The first-person perspective is used to emphasize our direct involvement in designing the task, ensuring the reader understands the rationale behind each structural choice. The structure follows a logical progression: we begin by introducing the general method, transition into an explanation of how our prompt aligns with this method, and conclude by justifying the approach's efficiency. By maintaining an academic tone, we reinforce the credibility and rigor of our prompt-writing methodology. We list some of the prompts used for tasks at different stages following below.
381
+
382
+ # A.1 Prompt of Stage 1 when Processing Data (Sample from AudioSet)
383
+
384
+ We are annotating some audio and designing some questions. You are an excellent audio analyst. Next, you will receive an audio and one absolutely correct but simple description. Your task is to first generate a more detailed, in-depth and absolutely correct new description based on the given descriptions. Then, use this description to generate three open-ended or single-choice questions with four options along with their answers. Please separate different parts using <caption>...</caption>
385
+ <question1><question>...</question>
386
+ <answer>...</answer></question1>
387
+ <question2>
388
+ <question>...</question>
389
+ <answer>...</answer></question2>.
390
+
391
+ Here is a sample. Please strictly follow the format in the sample. <caption>The audio presents a sustained, high-frequency static noise, characteristic of a detuned or malfunctioning electronic device, likely a television or radio...</caption><question1></question>Describe the characteristics of the static noise in the audio, and how these characteristics change over time.</question><answer)...</answer></question1></question2> <questionWhat...? </question> <answer...</answer> </question2> <question3></question> What...? </question> <answer...</answer></question3>
392
+
393
+ Here is the original description: *** label here ***.
394
+
395
+ Here is the audio.
396
+
397
+ Please design three questions that gradually become more challenging, starting from basic factual questions, but don't deviate from the content of the audio itself. If it's a single-choice question, please give four options like (a) one, (b) two, .... and the answer should be analyzed and end with a format like "the answer is B."
398
+
399
+ # A.2 Prompt of Stage 2 when Processing Data (Sample from AudioSet)
400
+
401
+ We are now designing a system to generate structured audio-based chain-of-thought reasoning data. You will receive an audio clip, its textual description, as well as a question and its answer. Your task is to explore in more detail the thinking process from the question to the answer. Your chain of thought will be divided into two main parts, the <think> and <response> sections. In the <think> section, you need to go through four steps: planning, captioning, reasoning, and summarizing. The <think> section is invisible to the user. Therefore, in the <response> section, you need to base on all the reasoning processes and results in the <think> section and provide a final reply based on the question. Finally, your reply should strictly follow the following format: <THINK><PLANNING> (In this part, analyze the user's needs and how to complete the task. If the problem is complex, it should be done step by step) </PLANNING><CAPTION> (In this part, conduct an overall analysis of the given audio input content, try to find all the parts related to the question, describe them, and ensure it is completely correct.) </CAPTION><REASONING> (Start reasoning towards the final answer here) </REASONING><SUMMARY> (Draw appropriate results based on the reasoning part here) </SUMMARY></THINK><RESPONSE> Give the final answer here referring to the <THINK> part </RESPONSE> Please strictly follow the format of the sample.
402
+
403
+ Sample:
404
+
405
+ <THINK>
406
+
407
+ <PLANNING>
408
+
409
+ The user wants to understand the dynamic changes within the provided audio clip ....
410
+
411
+ </PLANNING>
412
+
413
+ <CAPTION>
414
+
415
+ The audio clip predominantly features static noise. .... similar to that of a detuned television or a device failing to receive a signal.
416
+
417
+ </CAPTION>
418
+
419
+ <REASONING>
420
+
421
+ 1. Identify changes in Intensity (Volume): The audio's static noise does not remain at a constant volume. There are noticeable increases and decreases in loudness throughout the clip. .... is struggling to maintain a consistent output, adding to the impression of something malfunctioning or broken.
422
+
423
+ </REASONING>
424
+
425
+ <SUMMARY>
426
+
427
+ The static noise in the audio is highly dynamic. .... leading to a sense of disorder and instability.
428
+
429
+ </SUMMARY>
430
+
431
+ </THINK>
432
+
433
+ <RESPONSE>
434
+
435
+ The audio presents a static noise, .... The overall effect is one of energetic chaos, preventing any possibility of calm or predictability.
436
+
437
+ </RESPONSE>
438
+
439
+ Note that you have both the question and the answer because it is necessary to ensure the correctness of the chain of thought. However, in your response, you can only refer to the content of the question and the audio, and lead to the answer. You must absolutely not assume that you already know the answer. Please provide a detailed and flexible response with high-quality logic in both the caption and reasoning sections. If the reasoning part requires complex logic, you can even propose several different approaches and try them one by one. Here is the original description: *** caption here ***.
440
+
441
+ The question is: *** question here ***.
442
+
443
+ The answer you can refer to : *** answer here ***.
444
+
445
+ Again, don't mention that you have the answer and the description because they are only here to help you to design the chain of thought but should not exist in the real-world scenario, either in the think or response sections.
446
+
447
+ # A.3 Prompt of Stage 3 when Processing Data (Sample from AudioSet)
448
+
449
+ We are data reviewers. Next, you will receive an audio clip, along with its description, questions, answers, and most importantly, the thought process for solving the problems. Please determine and analyze whether all of these elements are completely correct, especially check if there are any hallucinations in the thought process. Return <True> if there are no issues, and <False> if there are errors in the data.
450
+
451
+ Here is the description of the audio: *** caption here ***.
452
+
453
+ Here is the question: *** question here ***.
454
+
455
+ Here is the answer: *** answer here ***.
456
+
457
+ And here is the thought process: *** COT process here ***.
458
+
459
+ Please conduct a thorough judgment and analysis and provide the result in the specified format.
460
+
461
+ # B Synthetic Data Generation Pipeline
462
+
463
+ # B.1 Synthetic Data Introduction
464
+
465
+ Multi-Speaker Dataset: To enhance the model's ability to comprehend complex, multi-turn conversations among multiple speakers, we constructed the Multi-Speaker dataset using text-to-speech (TTS) technology. The dataset generation process consists of three steps: (1) Conversation Text Generation: We utilized commercial models to generate diverse multi-speaker conversation texts covering a wide range of scenarios. (2) Speech Synthesis: Leveraging all available timbres from LibriSpeech [Panayotov et al., 2015] as prompts, we employed the CosyVoice2 [Du et al., 2024] framework to synthesize high-quality speech samples. (3) Dataset Assembly: The synthesized speech samples, fearring distinct timbres, were carefully combined to create a rich and diverse multi-speaker conversation dataset.
466
+
467
+ This approach ensures that the dataset effectively trains the model to navigate intricate speaker interactions in natural conversations.
468
+
469
+ Complex Audio Dataset: To strengthen the model's ability to process intricate audio compositions, we developed the Complex Audio dataset. The audio materials are sourced from AudioSet, AudioCaps, and our internal material library. The dataset consists of two primary types of audio data: (1) Temporally Complex Audio: Short audio clips were sequentially arranged, requiring the model to perform tasks such as sorting or counting based on temporal relationships. We sequentially arranged different short audio clips and required the model to perform sorting or counting tasks. (2) Composite Complex Audio: Multiple long-form audio tracks—such as engine sounds and musical instruments—were blended. The model was then tasked with extracting specific target sounds from the composite audio, enhancing its ability to analyze overlapping audio sources.
470
+
471
+ This dataset ensures that the model is well-equipped to handle real-world audio challenges, such as distinguishing between multiple concurrent sounds and identifying temporal patterns.
472
+
473
+ To better understand what Multi-Speaker dataset and Complex Audio dataset are respectively tasked with, we provide some examples in Sec. B.2, Sec. B.3.
474
+
475
+ # B.2 Sample from Complex Audio Dataset
476
+
477
+ <THINK>
478
+ <PLANNING>
479
+
480
+ The question is about identifying the sounds in the audio and arranging them in the correct order. I will carefully listen to the audio multiple times, noting down the sounds as I recognize them. Then, I will compare the identified sound sequence with the provided answer options to choose the one that matches best.
481
+
482
+ </PLANNING>
483
+
484
+ <CAPTION>Upon initial listening, I can discern sounds that seem like a shattering noise, an animal sound which I think sounds like a cow, a cat sound, and what sounds like some mechanical tools working, perhaps a chainsaw. I shall listen again to clarify the sounds and their order.
485
+
486
+ </CAPTION>
487
+
488
+ <REASONING>1. After relistening, the first sound is definitely glass breaking.
489
+
490
+ 2. The second sound is definitely a cow moing.
491
+ 3. The third sound is a cat meowing.
492
+ 4. The fourth sound is a chainsaw.
493
+ 5. I compare this sequence (glass break, cow, cat, chainsaw) with the options provided.
494
+ 6. Option (a) matches the order I identified.
495
+ </REASONING>
496
+ <SUMMARY>Based on the detailed listening and analysis, the correct order of sounds in the audio is: glass break, cow, cat, chainsaw. This matches option (a).
497
+ </SUMMARY>
498
+ </THINK>
499
+ <RESPONSE>
500
+ The answer is a.
501
+ </RESPONSE>
502
+
503
+ # B.3 Sample from Multi-Speaker Dataset
504
+
505
+ <THINK>
506
+
507
+ <PLANNING>
508
+
509
+ The user asks me to identify the number of speakers in the audio clip. To do this, I will listen carefully and try to distinguish unique voices. The conversation is between two hikers, I need to confirm this by identifying their individual lines and vocal characteristics. Finally, I will provide the answer based on the number of identified speakers.
510
+
511
+ </PLANNING>
512
+
513
+ <CAPTION>
514
+
515
+ The audio presents a conversation between two individuals hiking.
516
+
517
+ Speaker 1: This person starts by commenting on the intensity of the climb and expresses doubt about continuing without a break. Later, after a reminder about the ranger's warning, this speaker agrees to turn back. The vocal tone suggests exertion and a desire to reach the summit, eventually giving way to agreement. Phrases spoken: "Wow, this climb is intense. I am not sure I can go much further without a serious break," "Turn back. But we're so close to the summit. I really want to make it, you know," "Unpredictable mountain weather, okay. Okay, you are right."
518
+
519
+ Speaker 2: This person responds to the first speaker's concerns, suggesting they turn back due to the darkening sky and diminishing daylight. They emphasize safety and remind the other speaker about the ranger's warning. The vocal tone conveys concern and prioritizes safety. Phrases spoken: "I know right. I am starting to think we should turn back. That sky looks awfully dark, and we're losing daylight quickly," "I hear you, but safety first. Remember what the ranger said about the unpredictable mountain weather," "So, turn back."
520
+
521
+ Based on the clear distinction in vocal tones and the back-and-forth nature of the dialogue, I can confirm that there are two distinct speakers in this audio clip.
522
+
523
+ </CAPTION>
524
+
525
+ <REASONING>
526
+
527
+ Initial Assessment: The audio starts with two distinct voices in a hiking - related dialogue. The voices are clearly distinguishable from the get - go, with one having a deeper timbre and the other a higher - pitched one.
528
+
529
+ Dialogue Analysis: A close look at the content shows a clear difference in the speakers' stances. One is eager to press on to the summit, saying, "We're almost there. Let's keep going." The other is more cautious, citing the darkening sky and fading daylight, stating, "The weather looks bad. We should turn back."
530
+
531
+ Vocal Tone Differentiation: Each speaker's vocal tone stays consistent. The eager speaker has an energetic and determined tone, with a rising pitch when advocating for continuing. The cautious speaker has a measured, worried tone, with a lower pitch when warning of danger.
532
+
533
+ Confirming Speaker Count: A review of the audio shows no extra voices or interruptions.
534
+
535
+ The conversation moves smoothly between the two speakers, indicating only two are present.
536
+
537
+ Final Answer: Based on this analysis, there are two individuals in the conversation.
538
+
539
+ Thus, the answer is B.
540
+
541
+ </REASONING>
542
+
543
+ <SUMMARY>
544
+
545
+ Based on the dialogue and vocal tone analysis, there are two distinct speakers in the audio.
546
+
547
+ The answer is B.
548
+
549
+ </SUMMARY>
550
+
551
+ </THINK>
552
+
553
+ <RESPONSE>
554
+
555
+ Based on the dialogue and distinct vocal tones present in the audio, I have identified two individuals engaged in the conversation. Therefore, the answer is B.
556
+
557
+ </RESPONSE>
558
+
559
+ ![](images/2c7cf6efc88d700632067417e48e5a03b2a49288065af2afae5640b0f1376a7a.jpg)
560
+ C Further Dataset Analysis
561
+ Figure 6: Detailed information of the CoTA dataset: The maximum, minimum, and average number of words in the answers, as well as the average length of the audio.
562
+
563
+ CoTA's reasoning adaptability is also evident in its distribution of token lengths and audio duration across tasks in Figure 6. The dataset accommodates a broad range of reasoning complexities, with token lengths spanning 327 (CoVoST2) to 1,454 (Multi-Speaker), ensuring coverage of both concise and highly intricate reasoning processes. Notably, tasks requiring deep logical inference, such as complex audio, exhibit a well-balanced token distribution $(\max = 423, \text{avg} = 192.96)$ , supporting structured multi-step reasoning without unnecessary redundancy.
564
+
565
+ Further, the dataset's average token lengths (164.48-481.57) align with task difficulty: longer reasoning chains characterize tasks such as sound description (AudioSet: 395.26) and music understanding (MusicBench: 463.89). Meanwhile, CoTA ensures practical generalization by maintaining audio durations between 2.85s and 26.34s, where shorter clips (e.g., MELD: 2.84s) support concise context-dependent reasoning, while extended sequences (Multi-Speaker: 26.34s) enable complex multi-turn inference. This systematic variation in reasoning depth and audio granularity ensures adaptability across diverse tasks, addressing the limitation of one-size-fits-all reasoning chains in existing audio datasets.
566
+
567
+ # D More Case Studies
568
+
569
+ ![](images/9b43c9b6c9f74919ef542d987cdc96e3d1e58021911f412304ed512d5557ae1d.jpg)
570
+ Figure 7: Example of Audio-Reasoner answering music-based question.
571
+
572
+ User
573
+
574
+ Analyze the interplay between the vocal sample, synth elements, and percussion within the track. How do these elements contribute to the overall texture and dynamic progression of the piece, particularly focusing on the shift from the intro to the main rhythm?
575
+
576
+ ![](images/858369a50834266a93f34f3916a01464cba0e42a19f40a500f1166c517b2870b.jpg)
577
+
578
+ ![](images/ba4834b047ef995baaa14b47b6dfb094678968914445d6cc380fd50ae8d1ba44.jpg)
579
+
580
+ Audio Reasoner
581
+
582
+ # <THINK>
583
+
584
+ <PLANNING>The user is asking us to identify potential influences or subgenres within rock music based on the provided instrumental track's instrumentation, chord progression, and overall sonic character. I will start with analyzing the chord progression, instrumentation, and overall sonic character of the music track. Then I will use these analyses to deduce potential influences and subgenres.</PLANNING>
585
+
586
+ <CAPTION>The music features a rock-oriented instrumental piece. The lead melody is carried by an electric guitar, featuring a slight overdrive or distortion. Acoustic rhythm guitars provide harmonic support, panned to the left and right. A slap-style bass guitar and acoustic drums form the rhythm section. The song follows a medium tempo in 4/4 time, with a chord progression of Bb, Ab6, F, and Ab6.<CAPTION>
587
+
588
+ <REASONING> The chord progression, Bb, Ab6, F, Ab6, has blues rock similarities because it includes flat 7 chords. Electric guitar with overdrive or distortion, a driving rhythm section (bass and drums), and acoustic rhythm guitars are characteristics of rock music. Slap bass is used in funk music. Given these considerations, several potential influences and subgenres present themselves. The blues and rock elements of the chord progression could indicate the influence of blues rock and hard rock, which often incorporate blues-based progressions with louder, more aggressive instrumentation. The slap bassline suggests a funk influence, which can be found in funk rock and other rock subgenres that incorporate funk elements. The overall sonic character is most indicative of rock, with potential cross-pollination from blues and funk.</REASONING>
589
+
590
+ <SUMMARY>Based on the chord progression of Bb, Ab6, F, Ab6, the prominent electric guitar solo with overdrive, the acoustic rhythm guitars, the slap-style bass, and the acoustic drums, the music has potential influences or subgenres within rock music. These may include blues rock, hard rock, and funk rock.</SUMMARY>
591
+
592
+ # </THINK>
593
+
594
+ # <RESPONSE>
595
+
596
+ Considering the instrumentation, the chord progression (Bb, Ab6, F, Ab6), and the overall sonic character of the piece, this instrumental track may draw upon influences from several subgenres within rock music. The chord progression has blues and rock similarities, pointing towards blues-rock and hard-rock elements. Moreover, the slap bassline indicates the touch of funk.
597
+
598
+ # </RESPONSE>
599
+
600
+ ![](images/a36cb05d818d194a4e6d2b64a41064f13fcd8e3fecc09d832b319f34886fd1c0.jpg)
601
+ User
602
+
603
+ Considering the sounds present in the audio, describe the likely environment in which the dog is located and detail the types of interactions or activities the dog might be engaged in.
604
+
605
+ ![](images/6656392ff0178a74a688308bf440905dfacb28c80a2e0b33f331b3730e202032.jpg)
606
+
607
+ # <THINK>
608
+
609
+ <PLANNING>The question asks about the dog's environment and activities based on the audio. I need to carefully analyze the audio to identify relevant sounds, infer the setting, and deduce potential interactions.</PLANNING>
610
+
611
+ <CAPTION>The audio contains dog barks of varying intensity and pitch. There are also sounds suggesting movement, such as rustling or the distinct sounds of the dog moving on what sounds to be hard floor surfaces. These movement sounds occur both during and between the barking, with the barks themselves being the main focus of sound. The pitch of the bark changes at different points, from higher to lower and back again.</CAPTION>
612
+
613
+ # <REASONING>
614
+
615
+ 1. **Environment Inference:** The presence of hard floor surface sounds, coupled with the absence of specific outdoor environmental noises (e.g., wind, traffic, birdsong), leans towards an indoor location. The changing pitch and tone of barks show the arousal level is rapidly changing.
616
+ 2. **Activity Inference:** The barking, interspersed with the sound of movement, indicates the dog is active. The varied bark intensity could suggest excitement, playfulness, or even alert behavior in relation to other sounds.
617
+ 3. **Interaction Possibilities:** Based on the sounds alone, it's difficult to pinpoint the exact interaction. It could be playing with a toy, reacting to someone entering the room, or engaging in a game.</REASONING>
618
+
619
+ <SUMMARY>\nThe dog is likely indoors on a hard floor surface. The audio suggests the dog is actively moving and barking, possibly engaged in play or reacting to something in its immediate surroundings. The dog is probably reacting to a stimulus within its environment, resulting in a change in the dog's arousal and tone and pitch of barks.</SUMMARY>
620
+
621
+ # </THINK>
622
+
623
+ # <RESPONSE>
624
+
625
+ Based on the audio, the dog is likely located in an indoor environment with hard floor surface sounds. The varied intensity and sound of the dog's barks, combined with the sound of its movement, indicate the dog is actively moving and vocalizing, and that the arousal level of the dog is actively changing. Given this information, the dog might be engaged in some activity or play, perhaps reacting to a toy or another person within the room.
626
+
627
+ # </RESPONSE>
628
+
629
+ ![](images/aff642ef84ae66fc0319f623312be2998e8ccbd4665d511a6391ba6638e6705a.jpg)
630
+ Figure 8: Example of Audio-Reasoner answering sound-based question.
631
+ Audio Reasoner
632
+
633
+ ![](images/250663b1591e6ae5dbf7df5a0b76757a799f877069a3d178ee425094da48c030.jpg)
634
+ User
635
+
636
+ What did the second person say about the rose bush?
637
+
638
+ (a) They like it.
639
+ (b) It's the bane of their existence.
640
+ (c) It needs more fertilizer.
641
+ (d) It provides good shade.
642
+
643
+ ![](images/18e4c56940d96a5d28d7df8cd5be561661e52ea20dcad7561fe0902a688b15f5.jpg)
644
+
645
+ # <THINK>
646
+
647
+ <PLANNING>The question asks about what the second person said about the rose bush. The audio presents a conversation between two neighbors, Amelia and Ben. I need to identify who the second speaker is and what they said about the rose bush. Then, I need to match what the second speaker said with the given options.<PLANNING>
648
+
649
+ <CAPTION>The audio presents a conversation between Amelia and Ben about the upcoming block party and a troublesome rose bush.
650
+
651
+ **Speaker 1 (Amelia)**: Asks Ben if he's attending the block party and mentions the rose bush causing him trouble. She says "You mentioned weedies, that rose bush still causing you trouble, the one that keeps growing over the fence?". She also mentions she has been scratched by it.
652
+
653
+ **Speaker 2 (Ben)**: Confirms he's attending the block party, entering his chili in the cook-off, and complains about the rose bush. He says "That thorny rose bush is the bane of my existence. It's constantly scratching me when I am gardening. So, yes, it's still causing me trouble." and suggests chopping it down. $\backslash$ n\`nTherefore, Ben is the second speaker.</CAPTION>
654
+
655
+ # <REASONING>
656
+
657
+ 1. **Identify the second speaker:** Based on the audio, Ben is the second speaker.
658
+ 2. **Identify what Ben said about the rose bush:** Ben says "That thorny rose bush is the bane of my existence. It's constantly scratching me when I am gardening. So, yes, it's still causing me trouble." and suggests chopping it down.
659
+ 3. **Match Ben's statement with the options:**
660
+ - (a) They like it. - This is incorrect because Ben clearly dislikes the rose bush.
661
+ - (b) It's the bane of their existence. - This aligns perfectly with Ben's statement.
662
+ - (c) It needs more fertilizer. - This is not mentioned in the audio.
663
+ - (d) It provides good shade. - This is also not mentioned in the audio.
664
+
665
+ Therefore, the correct answer is (b).</REASONING>
666
+
667
+ <SUMMARY>\nBased on the audio, the second speaker, Ben, describes the rose bush as "the bane of my existence" and complains about it scratching him. Therefore, the answer is (b). </SUMMARY>
668
+
669
+ # </THINK>
670
+
671
+ # <RESPONSE>
672
+
673
+ The second person, Ben, explicitly states that the rose bush $"is the bane of my existence"$ . Therefore, the answer is B.
674
+
675
+ </RESPONSE>
676
+
677
+ ![](images/e1fbcf9ad869804d415d3768a1bafdb4f20a43d182c0fe8e203fecf5c5920fde.jpg)
678
+ Figure 9: Example of Audio-Reasoner answering multi-speaker speech-based question.
679
+ Audio
680
+ Reasoner
681
+
682
+ ![](images/bf0107d5c8c65914f99bf9fa3e65a54865c8b756def2b36ec09a1f74a1a04e7b.jpg)
683
+ User
684
+
685
+ Given that you hear buzzing plastic, identify the background sound?
686
+
687
+ (a). A washing machine running
688
+ (b). Rain falling on a window
689
+ (c). A toilet flushing
690
+ (d). Waves crashing on a shore
691
+
692
+ ![](images/f73ca89ffd4040b5bfdf6ba30b94d0ae554820407dfdcdd29189fdf86508fba7.jpg)
693
+
694
+ # <THINK>
695
+
696
+ # <PLANNING>
697
+
698
+ I will carefully and methodically approach the task of identifying the background sound in the audio clip. This will allow me to focus solely on the characteristics of the background noise. Once the background sound is isolated, I will analyze it in detail to determine its key qualities, such as its consistency, rhythm, and texture. Afterward, I will compare these characteristics to the provided answer options.</PLANNING>
699
+
700
+ <CAPTION> Upon my initial listening, I observed that the background sound possesses characteristics that suggest fluid movement. There is a continuous, almost rhythmic, sound with subtle gurgling and sloshing. This could indicate the presence of water or some other flowing liquid. As I continue to listen more carefully, the flow becomes even more evident, suggesting a consistent rush or stream-like sound. </CAPTION>
701
+
702
+ # <REASONING>
703
+
704
+ 1. **Sound Characteristics:** My first observation is that the background sound features a continuous, almost rhythmic quality, with faint gurgling noises. These characteristics often point to a fluid in motion. Upon further listening, I recognize that the sound might be created by a liquid flowing from one place to another.
705
+ 2. **Detailed Listening:** I focus on the quieter sections of the sound, where the fluid dynamics become clearer. The consistent, almost rushing quality suggests the movement of water rather than irregular drops or splashes.
706
+ 3. **Comparison to Options:** With this in mind, I now compare the sound characteristics to the provided answer options. - "A toilet flushing," which often involves a steady rush of water with gurgling and sloshing as the water flows through pipes. This closely matches the consistent flow and gurgling noises I hear in the background.
707
+ 4. **Conclusion:** Based on my analysis, the sound best aligns with the description of a toilet flushing, due to the nature of the consistent, rushing water and subtle gurgling sounds. </REASONING>
708
+
709
+ # </THINK>
710
+
711
+ # <RESPONSE>
712
+
713
+ After listening to the audio carefully, isolating the background sound, and comparing it to the answer options, I concluded that the background sound most closely matches the sound of a toilet flushing. The key factors leading to this conclusion include the continuous rush of water and the gurgling quality, which are typical of a flushing toilet.
714
+
715
+ # </RESPONSE>
716
+
717
+ ![](images/02e9abd6638b2b63d0c5724c13130da9bc69a42b07a4d1726ec78a20919f111b.jpg)
718
+ Figure 10: Example of Audio-Reasoner answering complex-audio-based question.
719
+ Audio
720
+ Reasoner
data/2025/2503_02xxx/2503.02318/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03fba1d70509c7b63f1ef2844f72d6b19e60c2cc3ce91922040f8d5e61021e98
3
+ size 1037863
data/2025/2503_02xxx/2503.02318/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_02xxx/2503.02324/ccb8695f-9db7-4b2d-953a-29631612cfb2_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_02xxx/2503.02324/ccb8695f-9db7-4b2d-953a-29631612cfb2_model.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_02xxx/2503.02324/ccb8695f-9db7-4b2d-953a-29631612cfb2_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09cdd0838c79a97e8f5924f1cbbff1e3a68b55a03165628afc8f03a2ed1ddd0e
3
+ size 587069
data/2025/2503_02xxx/2503.02324/full.md ADDED
@@ -0,0 +1,892 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # PROMPTCOT: Synthesizing Olympiad-level Problems for Mathematical Reasoning in Large Language Models
2
+
3
+ Xueliang Zhao\* Wei Wu\* Jian Guan\* Lingpeng Kong
4
+
5
+ $\star$ The University of Hong Kong $\star$ Ant Group
6
+
7
+ {xlzhao,lpk}@cs.hku.hk
8
+
9
+ {wuwei19850318, jianguanthu}@gmail.com
10
+
11
+ # Abstract
12
+
13
+ The ability of large language models to solve complex mathematical problems has progressed significantly, particularly for tasks requiring advanced reasoning. However, the scarcity of sufficiently challenging problems, particularly at the Olympiad level, hinders further advancements. In this work, we introduce PROMPTcot, a novel approach for automatically generating high-quality Olympiad-level math problems. The proposed method synthesizes complex problems based on mathematical concepts and the rationale behind problem construction, emulating the thought processes of experienced problem designers. We provide a theoretical analysis demonstrating that an optimal rationale should maximize both the likelihood of rationale generation given the associated concepts and the likelihood of problem generation conditioned on both the rationale and the concepts. Our method is evaluated on standard benchmarks including GSM8K, MATH-500, and AIME2024, where it consistently outperforms existing problem generation methods. Furthermore, we demonstrate that PROMPTcot exhibits superior data scalability, consistently maintaining high performance as the dataset size increases, outperforming the baselines. The implementation is available at https://github.com/zhaoxlpu/PromptCoT.
14
+
15
+ # 1 Introduction
16
+
17
+ "In mathematics the art of proposing a question must be held of higher value than solving it."
18
+
19
+ Georg Cantor
20
+
21
+ Recent advancements in large language models (LLMs) have greatly enhanced their capability for
22
+
23
+ solving complex problems through planning and reasoning. Particularly in mathematics, strong reasoning models such as OpenAI o1 (Jaech et al., 2024) and DeepSeek r1 (Guo et al., 2025) have significantly pushed the boundaries of AI from mastering grade school problems (Yang et al., 2024b) to excelling at Olympiad-level challenges (Guo et al., 2025). The remarkable achievements have inspired the community to explore dedicating more computational resources to the inference stage. Consequently, the scaling paradigm of LLMs is shifting from training time to test time (Snell et al., 2024). While powerful reasoning LLMs, such as DeepSeek r1, have been open-sourced, the details of math problem acquisition remain obscured in the published literature (e.g., the technical report of DeepSeek r1), and such data is still kept private. Consequently, studies aiming to reproduce o1-like or r1-like performance have to rely on open-source math datasets (Li et al., 2024b), which are often constrained by scale and difficulty. This leaves an open research question $(RQ)$ : how can we obtain high-quality, sufficiently challenging math problems at scale? We emphasize that $RQ$ is crucial for test-time scaling research, as it serves as a prerequisite for effectively initiating the supervised fine-tuning (SFT) or reinforcement learning (RL) process.
24
+
25
+ We investigate the automated generation of Olympiad-level math problems as a principled approach to addressing $RQ$ . Prior to our work, several projects have curated math datasets, typically involving problem synthesis procedures. Existing synthesis methods can be categorized into three frameworks: (1) direct generation through prompting powerful LLMs (Huang et al., 2024; Tang et al., 2024; Li et al., 2024b; Toshniwal et al., 2024), (2) mining from pretraining corpora (Yue et al., 2023; Li et al., 2024d), and (3) problem evolution, either through simple-to-simple rephrasing (Yu et al., 2023) or difficulty-based progression (Xu et al.,
26
+
27
+ <table><tr><td rowspan="2">Dataset</td><td rowspan="2">Qwen2.5-Math-72B-Instruct Accuracy (↓)</td><td rowspan="2">DeepSeek-R1-Distill-Qwen-7B Avg. Reasoning Tokens (↑)</td><td colspan="2">Micro Avg. Accuracy on MATH-500 and AIME2024</td></tr><tr><td>Fine-tuned Qwen2.5-Math-7B</td><td>Δ (↑)</td></tr><tr><td>AIME2024</td><td>30.0</td><td>4,159</td><td>-</td><td>-</td></tr><tr><td>PROMPTcot (ours)</td><td>48.9</td><td>6,502</td><td>80.8</td><td>+1.2</td></tr><tr><td>OpenMathInstruct (Toshniwal et al., 2024)</td><td>63.3</td><td>1,578</td><td>75.8</td><td>-3.8</td></tr><tr><td>NuminaMath (Li et al., 2024b)</td><td>60.9</td><td>3,039</td><td>76.6</td><td>-3.0</td></tr><tr><td>Evol-Instruct (Luo et al., 2023)</td><td>65.9</td><td>1,346</td><td>74.0</td><td>-5.6</td></tr><tr><td>KPDDS (Huang et al., 2024)</td><td>73.2</td><td>1,225</td><td>72.3</td><td>-7.3</td></tr></table>
28
+
29
+ Table 1: Difficulty and efficacy evaluation for different mathematical datasets. Accuracy: Performance of Qwen2.5-Math-72B-Instruct on the problems in different datasets. Note that we directly apply the model to solve the problems without any tuning, so the metric reflects difficulty of the problems for Qwen2.5-Math-72B-Instruct (↓: lower accuracy indicates higher difficulty). Avg. Reasoning Tokens: Average number of tokens in reasoning processes generated by DeepSeek-R1-Distill-Qwen-7B when processing the problems in different datasets (↑: larger numbers means DeepSeek-R1-Distill-Qwen-7B needs more tokens to complete reasoning, suggesting greater problem difficulty). Micro Avg. Accuracy on MATH-500 and AIME2024: Performance of Qwen2.5-Math-7B after fine-tuning on different datasets. Accuracy is computed as a weighted average over MATH-500 and AIME 2024. Note that fine-tuning is conducted using both the problems and their corresponding solutions. Further details are provided in Section 3. △: Difference in performance between Qwen2.5-Math-7B and Qwen2.5-Math-7B-Instruct (↑: larger margins suggests bigger contributions from the corresponding datasets). Note that no tuning is performed on Qwen2.5-Math-7B-Instruct, so its Micro Avg. Accuracy remains fixed at 79.6.
30
+
31
+ 2023; Luo et al., 2023). While these efforts progressively contribute to LLM reasoning, the problems provided by these methods now are not challenging enough for state-of-the-art models, making them less effective in further advancements. Table 1 provides a detailed analysis of problem difficulty in typical published works and open-source datasets, along with their efficacy in enhancing state-of-the-art LLMs. From this, we observe clear gaps in terms of difficulty between the problems in existing work and those in AIME, as well as the limited utility of these problems in further improving LLM performance.
32
+
33
+ The primary challenge in synthesizing Olympiad-level math problems lies in their scarcity in existing corpora. As a result, the complex reasoning patterns required for such problems are undertrained, making it difficult for LLMs to assign sufficient probability to them during inference. More broadly, what we aim to explore is by nature a low-resource generation problem, which is prevalent across various applications yet inherently contrasts with the fundamental working mechanisms of LLMs. We focus on the mathematical domain, but our method can be easily adapted to other domains. Specifically, we propose PROMPTCOt, a novel problem generation method that synthesizes a complex math problem based on given math concepts (e.g., "Prime Numbers", c.f. Figure 1) and a rationale that emulates a veteran teacher's thought process when designing math problems for students. The idea is inspired by the success of LLMs in "solving" difficult
34
+
35
+ problems, where detailed reasoning procedures (e.g., chain-of-thoughts (Wei et al., 2022a)) have proven particularly effective. Through theoretical analysis, we show that an optimal rationale should simultaneously maximize the likelihood of rationale generation given the concepts and the likelihood of problem generation conditioned on both the rationale and the concepts. Based on this analysis, we implement PROMPTCoT by first querying an LLM to generate rationales for prepared Olympiad-level math problems and their associated concepts, and then fine-tuning an LLM as a problem generation model using a set of concept-rationale-problem triples.
36
+
37
+ We conduct extensive experiments on multiple benchmarks focused on mathematical reasoning, including GSM8K (Cobbe et al., 2021), MATH-500 (Lightman et al., 2023), and AIME2024 (AIME-2024). Evaluation results demonstrate that, compared to a range of existing generation methods and datasets, PROMPTCoT leads to more significant improvements on state-of-the-art LLMs after distillation in both short-CoT and long-CoT settings, achieving $0.4\% - 4.8\%$ absolute gains on MATH-500 and $6.7\% - 20\%$ absolute gains on AIME2024. An extended experiment further shows that PROMPTCoT exhibits remarkable advantages over the baseline method as the number of problems gradually increases, highlighting its superior data scalability.
38
+
39
+ Our contributions are three-fold: (1) We propose synthesizing Olympiad-level math problems to enhance LLM reasoning. While previous re
40
+
41
+ ![](images/4b8dae9c8a073f5f2d8a9395e0e8c8918cefb3fc1d1f3a4fedafce05ba53e6d2.jpg)
42
+ Figure 1: Overview of the proposed method. Left: Given an Olympiad problem and its foundational concepts (extracted using an LLM), the goal is to identify rationales that act as “amplifiers” to enhance the likelihood of generating the Olympiad problem (i.e., $p(x \mid z, \mathbf{c})$ ) while ensuring that these rationales can be easily derived from the foundational concepts (i.e., $p(z \mid \mathbf{c})$ ). Right: Once an appropriate rationale is found, we use the (foundational concepts, rationale, Olympiad problem) triple to train a model capable of predicting both the rationale and the Olympiad problem from any given set of concepts. The model can be further optimized through rejection sampling, where the quality of generated outputs is evaluated and used for subsequent model training.
43
+
44
+ search (Snell et al., 2024) has identified problem difficulty as a key factor in improving LLMs' reasoning capabilities, to the best of our knowledge, we are the first to formally pose difficult problem generation as a research question (i.e., $RQ$ ); (2) We propose PROMPTCoT as a principled approach to generating Olympiad-level math problems. To the best of our knowledge, we are the first to apply the chain-of-thought paradigm to the task of problem generation; and (3) We conduct extensive experiments to evaluate the efficacy of PROMPTCoT. Notably, PROMPTCoT is the only method that enables Qwen2.5-Math base models to surpass their Instruct versions. Furthermore, a 7B model distilled using the problems generated by PROMPTCoT is able to achieve performance comparable to state-of-the-art 32B models on mathematical reasoning tasks.
45
+
46
+ # 2 Method
47
+
48
+ Figure 1 provides an overview of PROMPTCoT. In summary, our method comprises three key steps: (1) Concept Extraction: Foundational concepts are extracted from seed prompts collected from Olympiad math sources ( $\S 2.1$ ); (2) Rationale Generation: A rationale is inferred for each problem based on its associated concepts by maximizing both the probability of the rationale given the concepts and the probability of the problem given both the rationale and the concepts ( $\S 2.2$ ); and (3) Problem Generation Model Training: Concep-tionale-problem triples are used to fine-tune an LLM as a problem generation model, followed by a rejection sampling fine-tuning stage for further
49
+
50
+ self-improvement, ensuring the quality of generated problems (§2.3).
51
+
52
+ # 2.1 Concept Extraction
53
+
54
+ We collect a large set of seed prompts from the AoPS<sup>1</sup> platform which contains challenging Olympiad-level math problems. Problems overlapping with common test benchmarks are filtered out to prevent data leakage. For each seed prompt, domain-specific concepts are extracted using a large language model. The model is provided with a query instruction (see Appendix A for details) that directs it to identify the salient concepts associated with the problem. While tailored to mathematics in our study, these concepts serve as a concise abstraction of the underlying reasoning in problem design and can be readily adapted to other domains by replacing the seed prompts with those relevant to the target domain.
55
+
56
+ # 2.2 Rationale Generation
57
+
58
+ For effective problem generation, the underlying rationale should (i) be naturally derived from a set of foundational concepts and (ii) increase the likelihood of producing a high-quality problem. To capture this idea, let us denote the set of extracted $K$ concepts by
59
+
60
+ $$
61
+ \mathbf {c} = \left\{c _ {1}, c _ {2}, \dots , c _ {K} \right\}.
62
+ $$
63
+
64
+ We introduce a latent variable $z$ that represents the rationale connecting these concepts to the original
65
+
66
+ # Algorithm 1 Rationale-Guided Problem Generation
67
+
68
+ Require: A set of seed prompts $\{p_1, p_2, \ldots, p_n\}$ and an LLM for concept extraction and rationale generation.
69
+
70
+ 1: Initialize training set $T\gets \emptyset$
71
+ 2: for all seed prompt $x \in \{x_1, x_2, \ldots, x_n\}$ do
72
+ 3: Concept Extraction: Query the LLM (using the instruction in Appendix A) to extract a set of foundational concepts c.
73
+ 4: Rationale Generation: Query the LLM (using the instruction in Appendix B) with $x$ and $\mathbf{c}$ to generate a rationale $z$ .
74
+ 5: Add the triplet $\langle \mathbf{c},z,x\rangle$ to $T$
75
+ 6: Model Training: Train a problem generation model on $T$ with MLE and rejection sampling.
76
+ 7: Inference: For any given set of foundational concepts $\mathbf{c}'$ , use the trained model to generate a rationale $z'$ and a problem $x'$ , and aggregate these problems into the set $\{x_1', x_2', \ldots, x_m'\}$ .
77
+ Output: A set of high-quality and challenging problems $\{x_1', x_2', \ldots, x_m'\}$ .
78
+
79
+ problem $x$ . The generation process is then formulated as
80
+
81
+ $$
82
+ p (x \mid \mathbf {c}) = \sum_ {z} p (x, z \mid \mathbf {c}) = \sum_ {z} p (x \mid z, \mathbf {c}) p (z \mid \mathbf {c}).
83
+ $$
84
+
85
+ Ideally, we aim to maximize $p(x \mid \mathbf{c})$ ; however, marginalizing over the latent variable $z$ is intractable. Therefore, we introduce a variational distribution $q(z \mid \mathbf{c}, x)$ to approximate the true posterior $p(z \mid \mathbf{c}, x)$ and derive an evidence lower bound (ELBO) via Jensen's inequality:
86
+
87
+ $$
88
+ \log p (x \mid \mathbf {c}) \geq \mathbb {E} _ {q (z | \mathbf {c}, x)} \left[ \log \frac {p (x , z \mid \mathbf {c})}{q (z \mid \mathbf {c} , x)} \right].
89
+ $$
90
+
91
+ Lemma 1 (Optimal Variational Distribution). The optimal variational distribution $q^{\star}(z \mid \mathbf{c}, x)$ that maximizes the ELBO satisfies
92
+
93
+ $$
94
+ q ^ {\star} (z \mid \mathbf {c}, x) \propto p (x \mid z, \mathbf {c}) p (z \mid \mathbf {c}).
95
+ $$
96
+
97
+ The proof is provided in Appendix D. This result implies that the optimal distribution over the latent rationale is governed by two key factors: the extent to which the rationale is naturally derived from the set of foundational concepts $\mathbf{c}$ (as indicated by $p(z\mid \mathbf{c})$ ), and the degree to which this rationale increases the likelihood of generating a high-quality problem (as indicated by $p(x\mid z,\mathbf{c})$ ). In our framework, these insights ensure that the inferred rationale not only reflects the core input information but also contributes to the production of the problems. In practice, $z$ is obtained by querying a large language model with a dedicated instruction (see Appendix B).
98
+
99
+ # 2.3 Problem Generation Model
100
+
101
+ Through concept extraction and rationale generation, we construct a dataset $\mathcal{D} = \{(\mathbf{c},z,x)\}$ from the seed prompts, where $x$ represents a problem, $\mathbf{c}$ denotes the foundational concepts relevant to $x$ , and $z$ captures the underlying thought process
102
+
103
+ behind $x$ . An LLM is then fine-tuned on $\mathcal{D}$ as a problem generation model, enabling it to jointly synthesize a rationale and the corresponding problem given a set of foundational concepts. The training objective is defined by Maximum Likelihood Estimation (MLE).
104
+
105
+ To further ensure that synthesized problems are of high quality, we implement an iterative rejection sampling mechanism using the fine-tuned problem generation model from the previous step. Specifically, for each concept set $\mathbf{c} \in \mathcal{D}$ , the model generates candidate rationale-problem pairs $(\tilde{z}, \tilde{x})$ . These candidates undergo rigorous quality assessment through two independent LLMs serving as evaluators, which assign ratings based on predetermined criteria (detailed evaluator instructions are provided in Appendix C). Only candidate pairs receiving unanimous "perfect" ratings from both evaluators are retained for subsequent model optimization, thereby ensuring that the fine-tuning process exclusively incorporates exemplars of the highest quality. Algorithm 1 presents a formal summary of the rationale-guided problem generation procedure.
106
+
107
+ At inference time, the fine-tuned problem generation model is supplied with an arbitrary set of foundational concepts sampled uniformly from the entire training corpus. It then generates rationale-problem pairs $(z', x')$ that adhere to the rigorous quality standards established during training. This approach enables the generation of a large corpus of high-quality and challenging math problems that are robust and well-grounded in the underlying concepts.
108
+
109
+ When using the synthesized problems for LLM reasoning, we leverage a powerful teacher model (details are presented in Section 3) to generate detailed, step-by-step solutions for each problem, thereby constructing a comprehensive training dataset $\mathcal{D}_{\mathrm{train}} = \{(x', s')\}$ , where $s'$ denotes the solution corresponding to problem $x'$ , for training
110
+
111
+ downstream mathematical reasoning models.
112
+
113
+ # 3 Experiments
114
+
115
+ # 3.1 Datasets and Evaluation Metrics
116
+
117
+ We employ three standard benchmarks focused on mathematical reasoning. Specifically, we use the following datasets: (1) GSM8K (Cobbe et al., 2021) is a dataset consisting of grade-school level math word problems that require logical reasoning. It tests a model's ability to solve elementary-level math problems; (2) MATH-500 (Lightman et al., 2023) is a dataset containing high school-level math problems. It serves to assess a model's ability to handle more advanced mathematical reasoning; and (3) AIME 2024 (AIME-2024) is a benchmark that includes particularly challenging math problems from the American Invitational Mathematics Examination (AIME), designed to assess advanced problem-solving skills. These problems are significantly more difficult than typical high school-level math problems, requiring advanced reasoning and problem-solving strategies. We use exact match accuracy as the primary metric for evaluating the performance of our method in math problem-solving tasks. Specifically, for these tasks, accuracy is determined by comparing the predicted final answer, enclosed by \boxed{ } with the ground-truth answer.
118
+
119
+ # 3.2 Baseline Methods
120
+
121
+ We evaluate the proposed method in two settings: short-CoT and long-CoT. Short-CoT refers to the vanilla CoT (Wei et al., 2022a), where reasoning tokens are generated before deriving the final answer. Long-CoT (OpenAI, 2024b; Guo et al., 2025), on the other hand, requires the model to generate a deep reasoning process, which may include self-reflections, prior to producing the final CoT sequence. For the short-CoT setting, we compare our method with the following problem generation baselines: (1) Evol-Instruct: This method (Luo et al., 2023) aims to enhance the quality of instruction data by improving both its complexity and diversity, thus facilitating the generation of more varied and challenging problems; (2) KPDDS: A data synthesis framework (Huang et al., 2024) that generates question-answer pairs by leveraging key concepts and exemplar practices derived from authentic data sources; (3) OpenMathInstruct: This method (Toshniwal et al., 2024) utilizes few-shot learning to prompt an LLM to create new math problems based on existing exam
122
+
123
+ plies, without explicit instructions for adjusting difficulty or introducing new constraints; and (4) NuminaMath: This approach (Li et al., 2024b) uses an LLM to generate novel math questions starting from a reference problem. Additionally, we compare with an open-source model Qwen2.5-Math-Instruct, which is known for its state-of-the-art performance without relying on long-CoT reasoning. For methods that have not released the generated problems, specifically Evol-Instruct and KPDDS, we follow their papers and use Llama3.1-70B-Instruct (Dubey et al., 2024) as the LLM to generate the problems, ensuring that the number of generated problems is consistent with ours. For NuminaMath $^2$ and OpenMathInstruct $^3$ , we directly use the published problem sets. For all problem generation baselines, we use Qwen2.5-Math-72B-Instruct as the teacher model to generate solutions, ensuring a fair comparison across methods. For the long-CoT setting, we compare the proposed method with: DeepSeek-R1-Distill-Qwen, a leading model (Guo et al., 2025) that incorporates long-CoT reasoning (OpenAI, 2024b), allowing for more thorough and comprehensive problem-solving processes that generate solutions with deeper reasoning.
124
+
125
+ # 3.3 Implementation Details
126
+
127
+ We implement PROMPTCoT in two configurations: (1) PROMPTCoT-Qwen, distilled from Qwen2.5-Math-72B-Instruct (Yang et al., 2024b), which is capable of generating short-CoT (vanilla Chain-of-Thought) reasoning, and (2) PROMPTCoT-DS, distilled from DeepSeek-R1-Distill-Qwen-7B (Guo et al., 2025) (i.e., the teacher model), which is designed to generate long-CoT reasoning (OpenAI, 2024b). For PROMPTCoT-Qwen, we generate a total of $m = 905$ , 459 problems, while for PROMPTCoT-DS, we generate $m = 114$ , 763 problems<sup>4</sup>. For concept extraction, we construct a dataset consisting of 6,365 seed prompts. We use Llama-3.1-70B-Instruct to extract the relevant concepts, with the number of concepts per problem set to $k = 5$ . In the rationale generation phase, we leverage Llama-3.1-70B-Instruct (Dubey et al., 2024), Qwen2.5-72B-Instruct (Yang et al., 2024a), and Qwen2.5-32B-Instruct to diversify the
128
+
129
+ <table><tr><td>Models</td><td>Base</td><td>GSM8K</td><td>MATH-500</td><td>AIME2024</td></tr><tr><td colspan="5">short-CoT</td></tr><tr><td>Qwen2.5-Math-1.5B-Instruct</td><td>-</td><td>84.8</td><td>75.8</td><td>10.0</td></tr><tr><td>Openmathinstruct-1.5B</td><td>Qwen2.5-Math-1.5B</td><td>84.9</td><td>63.8</td><td>13.3</td></tr><tr><td>NuminaMath-1.5B</td><td>Qwen2.5-Math-1.5B</td><td>85.1</td><td>69.0</td><td>16.7</td></tr><tr><td>Evol-Instruct-1.5B</td><td>Qwen2.5-Math-1.5B</td><td>84.6</td><td>67.4</td><td>10.0</td></tr><tr><td>KPDDS-1.5B</td><td>Qwen2.5-Math-1.5B</td><td>83.4</td><td>64.4</td><td>6.7</td></tr><tr><td>PROMPTcot-Qwen-1.5B</td><td>Qwen2.5-Math-1.5B</td><td>87.1</td><td>78.4</td><td>26.7</td></tr><tr><td colspan="5">long-CoT</td></tr><tr><td>DeepSeek-R1-Distill-Qwen-1.5B†</td><td>-</td><td>85.1</td><td>80.2</td><td>23.3</td></tr><tr><td>PROMPTcot-DS-1.5B</td><td>DeepSeek-R1-Distill-Qwen-1.5B</td><td>86.1</td><td>85.0</td><td>43.3</td></tr></table>
130
+
131
+ Table 2: Evaluation results across three mathematical reasoning benchmarks for models with 1.5B parameters. Bold numbers indicate the highest performance in each respective setting. $\dagger$ indicates results reproduced using our prompt.
132
+
133
+ <table><tr><td>Models</td><td>Base</td><td>GSM8K</td><td>MATH-500</td><td>AIME2024</td></tr><tr><td colspan="5">short-CoT</td></tr><tr><td>Qwen2.5-Math-7B-Instruct</td><td>-</td><td>95.2</td><td>83.6</td><td>13.3</td></tr><tr><td>Openmathinstruct-7B</td><td>Qwen2.5-Math-7B</td><td>92.0</td><td>79.6</td><td>10.0</td></tr><tr><td>NuminaMath-7B</td><td>Qwen2.5-Math-7B</td><td>92.9</td><td>81.8</td><td>20.0</td></tr><tr><td>Evol-Instruct-7B</td><td>Qwen2.5-Math-7B</td><td>88.5</td><td>77.4</td><td>16.7</td></tr><tr><td>KPDDS-7B</td><td>Qwen2.5-Math-7B</td><td>89.9</td><td>76.0</td><td>10.0</td></tr><tr><td>PROMPTcot-Qwen-7B</td><td>Qwen2.5-Math-7B</td><td>93.3</td><td>84.0</td><td>26.7</td></tr><tr><td colspan="5">long-CoT</td></tr><tr><td>DeepSeek-R1-Distill-Qwen-7B†</td><td>-</td><td>91.7</td><td>91.6</td><td>43.3</td></tr><tr><td>PROMPTcot-DS-7B</td><td>DeepSeek-R1-Distill-Qwen-7B</td><td>92.6</td><td>93.0</td><td>60.0</td></tr></table>
134
+
135
+ dataset, resulting in a total of 19,095 conceptualization triple. We initialize the problem generation model with Llama-3.1-8B. During the MLE training stage, we employ a learning rate of $2 \times 10^{-5}$ and a batch size of 64. In the rejection sampling phase, we perform three rounds of evaluation, using Llama-3.1-70B-Instruct and Qwen2.5-72B-Instruct as evaluators. For the rejection sampling process, we continue to use the same learning rate of $2 \times 10^{-5}$ and batch size of 64 as in the MLE training stage. All experiments are conducted on 8×A100 80GB machines.
136
+
137
+ # 3.4 Main Results
138
+
139
+ The results of our experiments, presented in Tables 2 and 3, reveal the following key insights: (1) Our method achieves state-of-the-art performance across multiple benchmarks, outperforming the baselines on both short-CoT and long-CoT settings. This highlights the efficacy of our rationale-driven approach in generating high-quality problems; (2) As the problem difficulty increases from GSM8K to MATH-500 and AIME2024, our method's advantage becomes more pronounced. This is par
140
+
141
+ ticularly evident on AIME2024, which demands more advanced reasoning and problem construction. Our approach excels at generating Olympiad-level problems, capturing the more complex reasoning needed for such tasks; and (3) Our method further enhances the performance of long-CoT models like DeepSeek-R1-Distill-Qwen. The generation of Olympiad-level problems, with their higher complexity, taps into the full potential of long-CoT reasoning, enabling deeper and more effective reasoning processes.
142
+
143
+ Table 3: Evaluation results across three mathematical reasoning benchmarks for models with 7B parameters. Bold numbers indicate the highest performance in each respective setting. $\dagger$ indicates results reproduced using our prompt.
144
+
145
+ <table><tr><td>Models</td><td>GSM8K</td><td>MATH-500</td><td>AIME2024</td></tr><tr><td>PROMPTCOt (full)</td><td>87.1</td><td>78.4</td><td>26.7</td></tr><tr><td>- rationale</td><td>82.3</td><td>67.0</td><td>10.0</td></tr><tr><td>- optimal</td><td>86.3</td><td>72.6</td><td>16.7</td></tr><tr><td>- rejection sampling</td><td>85.9</td><td>75.2</td><td>20.0</td></tr></table>
146
+
147
+ Table 4: Ablation study results for 1.5B parameter models. Bold numbers indicate the highest performance.
148
+
149
+ # 4 Discussions
150
+
151
+ In addition to the extensive evaluation across multiple benchmarks, we seek to further understand the
152
+
153
+ underlying mechanisms of PROMPTCoT. Specifically, we explore the following research questions: (1) RQ1: How do the different components of PROMPTCoT contribute to its performance? (2) RQ2: How does the difficulty of the problems generated by PROMPTCoT compare to those from typical published works and open-source datasets? (3) RQ3: How does PROMPTCoT compare to state-of-the-art models with larger parameter sizes? (4) RQ4: What are the scaling properties of the problems generated by PROMPTCoT?
154
+
155
+ # 4.1 Ablation Study for RQ1
156
+
157
+ We perform an ablation study using 1.5B parameter models in the short-CoT setting and evaluate three variants of PROMPTCoT: exclusion of the rationale, denoted as “- rationale”; exclusion of the optimality condition for the rationale, referred to as “- optimal”; and exclusion of the rejection sampling process, represented as “- rejection sampling”.
158
+
159
+ The results in Table 4 show that the full version of PROMPTCoT consistently outperforms all variants, emphasizing the importance of each component. Excluding the rationale leads to the largest performance drop, indicating the crucial role of the rationale in guiding problem generation. The "optimal" variant also experiences a performance decrease, albeit smaller, highlighting the significance of the rationale's optimal construction. The exclusion of rejection sampling has the least impact, suggesting that while it helps align the problem generation model with predefined quality criteria, the difficulty of problems plays a more important role in reasoning tasks.
160
+
161
+ # 4.2 Analysis of Problem Difficulty for RQ2
162
+
163
+ While Table 1 in Section 1 has presented analysis on problem difficulty, we provide more details here. We assess the performance of Qwen2.5-Math-Instruct when tested on problem sets produced by different problem generation methods, including PROMPTCOT and other baselines. Specifically, we calculate the accuracy of Qwen2.5-Math-Instruct on these problem sets to compare their difficulty. We use the accuracy on AIME2024 as a reference. For open-source datasets, including NuminaMath and OpenMathInstruct, we use the provided answers in the datasets as the ground truth to calculate
164
+
165
+ accuracy. For other methods, including KPDDS, Evol-Instruct, and PROMPTCOT, we use a strong reasoning model, DeepSeek-R1-Distill-Qwen-7B, to label the ground truth answers, applying self-consistency (Wang et al., 2022) with 8 rollouts to ensure answer validity. The results in Figure 2 indicate that the difficulty of the problems generated by PROMPTCOT is closer to AIME, outperforming the other methods by a significant margin.
166
+
167
+ Additionally, we compare the reasoning tokens required for different problems. To compute the number of reasoning tokens, we use the DeepSeekR1-Distill-Qwen-7B model. In this experiment, reasoning tokens include both the portion between "<think>" and "<think>" tags, as well as the final CoT. The results in Table 1 demonstrate that PROMPTCoT-generated problems require a considerably higher number of reasoning tokens compared to the other methods, reflecting the increased difficulty of the problems generated by PROMPT-CoT.
168
+
169
+ <table><tr><td>Models</td><td>MATH-500</td><td>AIME2024</td></tr><tr><td>QwQ-32B</td><td>90.6</td><td>50.0</td></tr><tr><td>DeepSeek-R1-Distill-Qwen-32B†</td><td>90.4</td><td>63.3</td></tr><tr><td>S1-32B</td><td>93.0</td><td>56.7</td></tr><tr><td>PROMPTcot-DS-7B</td><td>93.0</td><td>60.0</td></tr></table>
170
+
171
+ Table 5: Performance comparison with state-of-the-art models having 32B parameters on MATH-500 and AIME2024. $\dagger$ indicates results reproduced using our prompt.
172
+
173
+ # 4.3 Performance Comparison for RQ3
174
+
175
+ We further compare PROMPTCoT-DS-7B with state-of-the-art reasoning models that have significantly larger parameter sizes to evaluate whether our model can match the reasoning capabilities typically associated with larger models. Specifically, we compare PROMPTCoT-DS-7B with the models QwQ (Team, 2024), DeepSeek-R1-Distill-Qwen32B (Guo et al., 2025), and S1-32B (Muennighoff et al., 2025), all of which leverage long-CoT to solve problems. The results presented in Table 5 show that PROMPTCoT-DS-7B achieves performance comparable to the state-of-the-art 32B parameter models. This suggests that our approach, through synthesizing Olympiad-level problems, enables smaller models to perform at a level previously associated with much larger models, highlighting the effectiveness of our method in enhancing reasoning capabilities without requiring an in
176
+
177
+ ![](images/35023d8dfedcf299e1028d5e780fb8bae58986c0f2ea35721b5e181ec9f76869.jpg)
178
+ Figure 2: Performance comparison of models with varying capabilities, using Qwen2.5-Math series models, across problems from diverse sources.
179
+
180
+ ![](images/0aa136e57370be5426cb7619a626c6bb04614abaa879277083cd639e4df7ff4f.jpg)
181
+
182
+ ![](images/13fe98cc1817e5239b842fc9c3f92047938b99157cc72ec14844cb086e5bb1b2.jpg)
183
+
184
+ crease in parameter size.
185
+
186
+ ![](images/a5a8f6f87278b4ebfc4e37e7c0559b80ba985961d3761dbce132eacc940bd80c.jpg)
187
+ Figure 3: Comparison of Scaling Performance between PROMPTCOT and OpenMathInstruct across Varying Problem Set Sizes
188
+
189
+ # 4.4 Scaling Properties for RQ4
190
+
191
+ We evaluate the scaling property of PROMPTCOt by comparing it with OpenMathInstruct across varying problem sizes, ranging from 200K to 800K problems. To conduct the evaluation, we use Qwen2.5-Math-1.5B as the base model and Qwen2.5-Math-Instruct-72B to generate solutions for the problems. We select MATH-500 as the evaluation dataset due to its balanced difficulty and appropriate scale. The results shown in Figure 3 demonstrate that PROMPTCOt consistently outperforms OpenMathInstruct across all problem sizes. PROMPTCOt exhibits significant scalability, maintaining superior performance as the dataset size increases, while OpenMathInstruct's performance gradually plateaus with larger problem sets.
192
+
193
+ # 5 Related Work
194
+
195
+ Mathematical Reasoning with LLMs. Recent advances in LLMs' mathematical capabilities have been driven by three key directions: data enrichment, methodological innovation, and test-time scaling. While extensive mathematical pre-training corpora (Paster et al., 2023; Wang et al., 2024;
196
+
197
+ Azerbaiyev et al., 2024; Shao et al., 2024) and curated fine-tuning datasets (Yue et al., 2023, 2024; Li et al., 2024a; Toshniwal et al., 2024; Wen et al., 2025; Ye et al., 2025) have enhanced model performance, concerns persist about their true reasoning capabilities versus pattern memorization (Li et al., 2024c; Mirzadeh et al., 2024; Zhang et al., 2024; Kambhampati, 2024). To address these limitations, researchers have explored in-context learning (Zhao et al., 2024c), advanced prompting techniques (Wei et al., 2022b; Press et al., 2023; Imani et al., 2023; Zhao et al., 2024b), specialized training approaches (Zhao et al., 2024a; Mitra et al., 2024; OpenAI, 2024a), and tool integration (Gao et al., 2023; Schick et al., 2024). Notable breakthroughs include DeepSeek-R1's (Guo et al., 2025) rule-based reinforcement learning and test-time scaling advances (Wu et al., 2024; Guan et al., 2025; Muennighoff et al., 2025). However, generating Olympiad-level problems remains challenging.
198
+
199
+ Prompt Synthesis. Current prompt synthesis approaches fall into three categories: LLM-driven generation, data-derived synthesis, and heuristic composition. LLM-driven methods use advanced models to generate prompts from seeds (Wang et al., 2023; Li et al., 2024b), sometimes incorporating personas (Ge et al., 2024) or concepts (Huang et al., 2024; Tang et al., 2024). Xu et al. (2024) introduced auto-regressive template completion for query generation. Data-derived approaches extract prompts from existing sources, using techniques like instruction backtranslation (Li et al., 2024d; Zhao et al., 2024d) and web corpus refinement (Yue et al., 2024). Heuristic composition methods systematically combine simpler elements, as demonstrated in visual reasoning (Cheng et al., 2024) and mathematical problem construction (Luo et al., 2023; Yu et al., 2024). While these methods show promise, they lack comprehensive rationales in syn
200
+
201
+ thesis, limiting their ability to generate sophisticated problems. Our approach addresses this by explicitly incorporating rationales during synthesis.
202
+
203
+ # 6 Conclusion
204
+
205
+ In this paper, we introduce PROMPTCoT, a novel approach for synthesizing Olympiad-level math problems. Our method integrates rationale generation to guide problem synthesis, inspired by the success of LLMs in reasoning tasks such as chain-of-thought. By leveraging this rationale and underlying mathematical concepts, we generate complex, high-quality problems that are well-suited to improve the reasoning capabilities of LLMs. Through extensive experimentation across multiple mathematical reasoning benchmarks, including GSM8K, MATH-500, and AIME2024, we show that PROMPTCoT outperforms existing problem generation methods.
206
+
207
+ # Ethical Considerations
208
+
209
+ In accordance with the established Code of Ethics, this study utilizes only publicly available data and information, ensuring that no private or confidential resources are involved.
210
+
211
+ # Limitations
212
+
213
+ While PROMPTCoT makes significant strides in the generation of challenging mathematical problems, there are several limitations that should be addressed in future work to fully unlock its potential:
214
+
215
+ (1) The problem generation model used in PROMPTCoT is based on Llama-3.1-8B, which, due to its relatively smaller scale, may still struggle to generate exceptionally challenging problems, such as those seen in the International Mathematical Olympiad (IMO). Although it performs well for tasks within the scope of current benchmarks, its capacity limits the generation of problems requiring more advanced reasoning and complexity. In future work, we plan to explore the use of larger pre-trained models to improve the quality and difficulty of the generated problems, enabling the synthesis of more sophisticated problem sets.
216
+ (2) While PROMPTCoT has successfully scaled problem generation to 905K problems, this scale remains modest compared to the vast amounts of data used for LLM pretraining. Expanding the problem generation capacity to larger scales is crucial for
217
+
218
+ generating more diverse and challenging problem sets. Future research should focus on extending the scalability of problem generation to better align with the scale of current LLM training data, contributing to the development of more robust and capable models.
219
+
220
+ # References
221
+
222
+ AIME-2024. https://huggingface.co/datasets/ai-mo/aimo-validation-aime.
223
+ Zhangir Azerbayev, Hailey Schoelkopf, Keiran Paster, Marco Dos Santos, Stephen McAleer, Albert Q. Jiang, Jia Deng, Stella Biderman, and Sean Welleck. 2024. Llemma: An open language model for mathematics. Preprint, arXiv:2310.10631.
224
+ Chuanqi Cheng, Jian Guan, Wei Wu, and Rui Yan. 2024. From the least to the most: Building a plug-and-play visual reasoner via data synthesis. Preprint, arXiv:2406.19934.
225
+ Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. 2021. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168.
226
+ Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. 2024. The llama 3 herd of models. arXiv preprint arXiv:2407.21783.
227
+ Luyu Gao, Aman Madaan, Shuyan Zhou, Uri Alon, Pengfei Liu, Yiming Yang, Jamie Callan, and Graham Neubig. 2023. Pal: Program-aided language models. In International Conference on Machine Learning, pages 10764–10799. PMLR.
228
+ Tao Ge, Xin Chan, Xiaoyang Wang, Dian Yu, Haitao Mi, and Dong Yu. 2024. Scaling synthetic data creation with 1,000,000,000 personas. Preprint, arXiv:2406.20094.
229
+ Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. 2025. rstar-math: Small llms can master math reasoning with self-evolved deep thinking. arXiv preprint arXiv:2501.04519.
230
+ Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948.
231
+ Yiming Huang, Xiao Liu, Yeyun Gong, Zhibin Gou, Yelong Shen, Nan Duan, and Weizhu Chen. 2024. Key-point-driven data synthesis with its enhancement on mathematical reasoning. arXiv preprint arXiv:2403.02333.
232
+
233
+ Shima Imani, Liang Du, and Harsh Shrivastava. 2023. Mathprompter: Mathematical reasoning using large language models. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 5: Industry Track), pages 37-42.
234
+ Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. 2024. Openai o1 system card. arXiv preprint arXiv:2412.16720.
235
+ Subbarao Kambhampati. 2024. Can large language models reason and plan? Annals of the New York Academy of Sciences, 1534(1):15-18.
236
+ Chen Li, Weiqi Wang, Jingcheng Hu, Yixuan Wei, Nanning Zheng, Han Hu, Zheng Zhang, and Houwen Peng. 2024a. Common 7b language models already possess strong math capabilities. Preprint, arXiv:2403.04706.
237
+ Jia Li, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Huang, Kashif Rasul, Longhui Yu, Albert Q Jiang, Ziju Shen, et al. 2024b. Numinamath: The largest public dataset in ai4maths with 860k pairs of competition math problems and solutions. Hugging Face repository, 13:9.
238
+ Qintong Li, Leyang Cui, Xueliang Zhao, Lingpeng Kong, and Wei Bi. 2024c. Gsm-plus: A comprehensive benchmark for evaluating the robustness of llms as mathematical problem solvers. arXiv preprint arXiv:2402.19255.
239
+ Xian Li, Ping Yu, Chunting Zhou, Timo Schick, Omer Levy, Luke Zettlemoyer, Jason Weston, and Mike Lewis. 2024d. Self-alignment with instruction back-translation. Preprint, arXiv:2308.06259.
240
+ Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. 2023. Let's verify step by step. arXiv preprint arXiv:2305.20050.
241
+ Haipeng Luo, Qingfeng Sun, Can Xu, Pu Zhao, Jianguang Lou, Chongyang Tao, Xiubo Geng, Qingwei Lin, Shifeng Chen, and Dongmei Zhang. 2023. Wizardmath: Empowering mathematical reasoning for large language models via reinforced evol-instruct. arXiv preprint arXiv:2308.09583.
242
+ Iman Mirzadeh, Keivan Alizadeh, Hooman Shahrokhi, Oncel Tuzel, Samy Bengio, and Mehrdad Farajtabar. 2024. Gsm-symbolic: Understanding the limitations of mathematical reasoning in large language models. Preprint, arXiv:2410.05229.
243
+ Arindam Mitra, Hamed Khanpour, Corby Rosset, and Ahmed Awadallah. 2024. Orca-math: Unlocking the potential of slms in grade school math. arXiv preprint arXiv:2402.14830.
244
+
245
+ Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. 2025. s1: Simple test-time scaling. Preprint, arXiv:2501.19393.
246
+ OpenAI. 2024a. Introducing openai o1-preview. https://openai.com/index/introducing-openai-o1-preview/.
247
+ OpenAI. 2024b. Learning to reason with llms, september 2024.
248
+ Keiran Paster, Marco Dos Santos, Zhangir Azerbayev, and Jimmy Ba. 2023. Openwebmath: An open dataset of high-quality mathematical web text. Preprint, arXiv:2310.06786.
249
+ Ofir Press, Muru Zhang, Sewon Min, Ludwig Schmidt, Noah A Smith, and Mike Lewis. 2023. Measuring and narrowing the compositionality gap in language models. In Findings of the Association for Computational Linguistics: EMNLP 2023, pages 5687-5711.
250
+ Timo Schick, Jane Dwivedi-Yu, Roberto Dessì, Roberta Raileanu, Maria Lomeli, Eric Hambro, Luke Zettle-moyer, Nicola Cancedda, and Thomas Scialom. 2024. Toolformer: Language models can teach themselves to use tools. Advances in Neural Information Processing Systems, 36.
251
+ Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Mingchuan Zhang, YK Li, Yu Wu, and Daya Guo. 2024. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300.
252
+ Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. 2024. Scaling llm test-time compute optimally can be more effective than scaling model parameters. arXiv preprint arXiv:2408.03314.
253
+ Zhengyang Tang, Xingxing Zhang, Benyou Wang, and Furu Wei. 2024. Mathscale: Scaling instruction tuning for mathematical reasoning. arXiv preprint arXiv:2403.02884.
254
+ Qwen Team. 2024. Qwq: Reflect deeply on the boundaries of the unknown.
255
+ Shubham Toshniwal, Wei Du, Ivan Moshkov, Branislav Kisacanin, Alexan Ayrapetyan, and Igor Gitman. 2024. Openmathinstruct-2: Accelerating ai for math with massive open-source instruction data. arXiv preprint arXiv:2410.01560.
256
+ Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. 2022. Self-consistency improves chain of thought reasoning in language models. arXiv preprint arXiv:2203.11171.
257
+ Yizhong Wang, Yeganeh Kordi, Swaroop Mishra, Alisa Liu, Noah A. Smith, Daniel Khashabi, and Hannaneh Hajishirzi. 2023. Self-instruct: Aligning language
258
+
259
+ models with self-generated instructions. Preprint, arXiv:2212.10560.
260
+ Zengzhi Wang, Xuefeng Li, Rui Xia, and Pengfei Liu. 2024. Mathpile: A billion-token-scale pretraining corpus for math. Preprint, arXiv:2312.17120.
261
+ Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022a. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837.
262
+ Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022b. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837.
263
+ Jiaxin Wen, Jian Guan, Hongning Wang, Wei Wu, and Minlie Huang. 2025. Unlocking reasoning potential in large language models by scaling code-form planning. In The Thirteenth International Conference on Learning Representations.
264
+ Yangzhen Wu, Zhiqing Sun, Shanda Li, Sean Welleck, and Yiming Yang. 2024. Inference scaling laws: An empirical analysis of compute-optimal inference for problem-solving with language models. arXiv preprint arXiv:2408.00724.
265
+ Can Xu, Qingfeng Sun, Kai Zheng, Xiubo Geng, Pu Zhao, Jiazhan Feng, Chongyang Tao, and Daxin Jiang. 2023. Wizardlm: Empowering large language models to follow complex instructions. arXiv preprint arXiv:2304.12244.
266
+ Zhangchen Xu, Fengqing Jiang, Luyao Niu, Yuntian Deng, Radha Poovendran, Yejin Choi, and Bill Yuchen Lin. 2024. Magpie: Alignment data synthesis from scratch by prompting aligned llms with nothing. Preprint, arXiv:2406.08464.
267
+ An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. 2024a. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115.
268
+ An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, et al. 2024b. Qwen2.5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122.
269
+ Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. 2025. Limo: Less is more for reasoning. Preprint, arXiv:2502.03387.
270
+ Longhui Yu, Weisen Jiang, Han Shi, Jincheng Yu, Zhengying Liu, Yu Zhang, James T Kwok, Zhenguo Li, Adrian Weller, and Weiyang Liu. 2023. Metamath: Bootstrap your own mathematical questions for large language models. arXiv preprint arXiv:2309.12284.
271
+
272
+ Longhui Yu, Weisen Jiang, Han Shi, Jincheng Yu, Zhengying Liu, Yu Zhang, James T. Kwok, Zhenguo Li, Adrian Weller, and Weiyang Liu. 2024. Metamath: Bootstrap your own mathematical questions for large language models. Preprint, arXiv:2309.12284.
273
+ Xiang Yue, Xingwei Qu, Ge Zhang, Yao Fu, Wenhao Huang, Huan Sun, Yu Su, and Wenhu Chen. 2023. Mammoth: Building math generalist models through hybrid instruction tuning. Preprint, arXiv:2309.05653.
274
+ Xiang Yue, Tuney Zheng, Ge Zhang, and Wenhu Chen. 2024. Mammoth2: Scaling instructions from the web. Preprint, arXiv:2405.03548.
275
+ Hugh Zhang, Jeff Da, Dean Lee, Vaughn Robinson, Catherine Wu, Will Song, Tiffany Zhao, Pranav Raja, Charlotte Zhuang, Dylan Slack, Qin Lyu, Sean Hendryx, Russell Kaplan, Michele Lunati, and Summer Yue. 2024. A careful examination of large language model performance on grade school arithmetic. Preprint, arXiv:2405.00332.
276
+ Xueliang Zhao, Xinting Huang, Wei Bi, and Lingpeng Kong. 2024a. Sego: Sequential subgoal optimization for mathematical problem-solving. In The 62nd Annual Meeting of the Association for Computational Linguistics (11/08/2024-16/08/2024, Bangkok, Thailand).
277
+ Xueliang Zhao, Xinting Huang, Tingchen Fu, Qintong Li, Shansan Gong, Lemao Liu, Wei Bi, and Lingpeng Kong. 2024b. Bba: Bi-modal behavioral alignment for reasoning with large vision-language models. In ACL (Findings).
278
+ Xueliang Zhao, Wenda Li, and Lingpeng Kong. 2024c. Subgoal-based demonstration learning for formal theorem proving. In Forty-first International Conference on Machine Learning.
279
+ Xueliang Zhao, Lin Zheng, Haige Bo, Changran Hu, Urmish Thakker, and Lingpeng Kong. 2024d. Subgoalxl: Subgoal-based expert learning for theorem proving. arXiv preprint arXiv:2408.11172.
280
+
281
+ # A Instruction for Concept Extraction
282
+
283
+ The following prompt extracts domain-specific concepts from each seed prompt. This instruction directs the large language model to identify the salient concepts underlying the given mathematical problem.
284
+
285
+ # Concept Extraction Prompt
286
+
287
+ As an expert in educational assessment, analyze this problem: {problem}
288
+
289
+ Break down and identify {num_concepts} foundational concepts being tested. List these knowledge points that:
290
+
291
+ - Are core curriculum concepts typically taught in standard courses,
292
+ - Are precise and measurable (not vague like "understanding math"),
293
+ - Are essential building blocks needed to solve this problem,
294
+ - Represent fundamental principles rather than problem-specific techniques.
295
+
296
+ Think through your analysis step by step, then format your response as a Python code snippet containing a list of {num_concepts} strings, where each string clearly describes one fundamental knowledge point.
297
+
298
+ # B Instruction for Rationale Generation
299
+
300
+ The following prompt is used to guide the large language model in generating a detailed thought process that serves as the rationale for prompt generation. Note that we emphasize two requirements with "IMPORTANT" markers in the prompt, aiming to enhance $p(x|z,\mathbf{c})$ and $p(z|\mathbf{c})$ , respectively.
301
+
302
+ # Rationale Generation Instruction
303
+
304
+ Imagine you are an expert in educational problem design.
305
+
306
+ You will be shown these components:
307
+
308
+ Problem: {problem}
309
+
310
+ Fundamental Concepts: {list of concepts}
311
+
312
+ Difficulty Level: {difficulty_level}
313
+
314
+ Your task is to reverse-engineer a clear thinking process that shows how a teacher might design this problem. This thinking process should:
315
+
316
+ - Show how combining the given foundational concepts naturally leads to a problem at the specified difficulty level.
317
+ - Include all key decisions and reasoning that shaped the problem design.
318
+ - (IMPORTANT) Be so precise and detailed that another teacher following these exact steps would recreate the identical problem.
319
+ - (IMPORTANT) Be so natural and logical that another teacher could derive the same thinking process using only the foundational concepts and difficulty level.
320
+
321
+ Present your answer after "Thinking Process:" with the complete step-by-step thinking process described above.
322
+
323
+ # Evaluation Prompt
324
+
325
+ As a critical expert in educational problem design, evaluate the following problem components:
326
+
327
+ $= = =$ GIVEN MATERIALS $= = =$
328
+
329
+ - 1. Problem & Design Rationale: {rationale_and probleme} (The rationale describes the author's thinking process and justification in designing this problem)
330
+ - 2. Foundational Concepts: {concept_text}
331
+ 3. Target Difficulty Level: {level}
332
+
333
+ $= = =$ EVALUATION CRITERIA $= = =$
334
+
335
+ Rate each criterion as: [Perfect | Acceptable | Bad]
336
+
337
+ 1. FORMAT
338
+
339
+ - Verify correct implementation of markup tags:
340
+ - <!-- BEGIN RATIONALE -> [design thinking process] <!-- END RATIONALE ->
341
+ - <!-- BEGIN PROBLEM -> [problem] <!-- END PROBLEM ->
342
+
343
+ 2. FACTUAL ACCURACY
344
+
345
+ - Check for any incorrect or misleading information in both problem and rationale
346
+ - Verify mathematical, scientific, or logical consistency
347
+
348
+ 3. DIFFICULTY ALIGNMENT
349
+
350
+ - Assess if problem complexity matches the specified difficulty level
351
+ - Evaluate if cognitive demands align with target level
352
+
353
+ 4. CONCEPT COVERAGE
354
+
355
+ - Evaluate how well the problem incorporates the given foundational concepts
356
+ - Check for missing concept applications
357
+
358
+ 5. SOLVABILITY
359
+
360
+ - Verify if the problem has at least one valid solution
361
+ - Check if all necessary information for solving is provided
362
+
363
+ $= = =$ RESPONSE FORMAT $= = =$
364
+
365
+ For each criterion, provide:
366
+
367
+ Rating: [Perfect | Acceptable | Bad]
368
+ - Justification: Clear explanation for the rating
369
+
370
+ $= = =$ FINAL VERDICT $= = =$
371
+
372
+ After providing all criterion evaluations, conclude your response with:
373
+
374
+ 'Final Judgement: [verdict]'
375
+
376
+ where verdict must be one of:
377
+
378
+ - 'perfect' (if both FACTUAL ACCURACY and SOLVABILITY are Perfect, at least two other criteria are Perfect, and no Bad ratings)
379
+ - 'acceptable' (if no Bad ratings and doesn't qualify for perfect)
380
+ 'bad' (if ANY Bad ratings)
381
+
382
+ Note: The 'Final Judgement: [verdict]' line must be the final line of your response.
383
+
384
+ # D Proof of the Optimal Variational Distribution
385
+
386
+ In this section, we provide a rigorous derivation of the optimal variational distribution $q^{\star}(z \mid \mathbf{c}, x)$ that maximizes the evidence lower bound (ELBO)
387
+
388
+ $$
389
+ \mathcal {L} (q) = \mathbb {E} _ {q (z | \mathbf {c}, x)} \left[ \log \frac {p (x , z \mid \mathbf {c})}{q (z \mid \mathbf {c} , x)} \right]
390
+ $$
391
+
392
+ subject to the normalization constraint
393
+
394
+ $$
395
+ \sum_ {z} q (z \mid \mathbf {c}, x) = 1.
396
+ $$
397
+
398
+ We wish to maximize the functional
399
+
400
+ $$
401
+ \mathcal {L} (q) = \sum_ {z} q (z \mid \mathbf {c}, x) \log \frac {p (x , z \mid \mathbf {c})}{q (z \mid \mathbf {c} , x)},
402
+ $$
403
+
404
+ with respect to $q(z\mid \mathbf{c},x)$ , subject to
405
+
406
+ $$
407
+ \sum_ {z} q (z \mid \mathbf {c}, x) = 1.
408
+ $$
409
+
410
+ To incorporate the constraint, we define the Lagrangian functional
411
+
412
+ $$
413
+ \mathcal {J} (q, \lambda) = \sum_ {z} q (z | \mathbf {c}, x) \log \frac {p (x , z | \mathbf {c})}{q (z | \mathbf {c} , x)} + \lambda \left(\sum_ {z} q (z | \mathbf {c}, x) - 1\right),
414
+ $$
415
+
416
+ where $\lambda$ is a Lagrange multiplier.
417
+
418
+ For each $z$ , we take the derivative of $\mathcal{J}(q,\lambda)$ with respect to $q(z\mid \mathbf{c},x)$ . Using standard calculus of variations, we obtain:
419
+
420
+ $$
421
+ \frac {\partial \mathcal {J}}{\partial q (z \mid \mathbf {c} , x)} = \log \frac {p (x , z \mid \mathbf {c})}{q (z \mid \mathbf {c} , x)} - 1 + \lambda .
422
+ $$
423
+
424
+ Setting this derivative to zero for optimality, we have
425
+
426
+ $$
427
+ \log \frac {p (x , z \mid \mathbf {c})}{q (z \mid \mathbf {c} , x)} - 1 + \lambda = 0.
428
+ $$
429
+
430
+ Rearrange the above equation to isolate $q(z \mid \mathbf{c}, x)$ :
431
+
432
+ $$
433
+ \log \frac {p (x , z \mid \mathbf {c})}{q (z \mid \mathbf {c} , x)} = 1 - \lambda .
434
+ $$
435
+
436
+ Exponentiating both sides yields
437
+
438
+ $$
439
+ \frac {p (x , z \mid \mathbf {c})}{q (z \mid \mathbf {c} , x)} = e ^ {1 - \lambda},
440
+ $$
441
+
442
+ or equivalently,
443
+
444
+ $$
445
+ q (z \mid \mathbf {c}, x) = p (x, z \mid \mathbf {c}) e ^ {- (1 - \lambda)}.
446
+ $$
447
+
448
+ We now enforce the normalization constraint:
449
+
450
+ $$
451
+ \sum_ {z} q (z \mid \mathbf {c}, x) = e ^ {- (1 - \lambda)} \sum_ {z} p (x, z \mid \mathbf {c}) = e ^ {- (1 - \lambda)} p (x \mid \mathbf {c}) = 1.
452
+ $$
453
+
454
+ Solving for $e^{-(1 - \lambda)}$ , we obtain
455
+
456
+ $$
457
+ e ^ {- (1 - \lambda)} = \frac {1}{p (x \mid \mathbf {c})}.
458
+ $$
459
+
460
+ Substituting this result back into the expression for $q(z \mid \mathbf{c}, x)$ , we arrive at the optimal variational distribution:
461
+
462
+ $$
463
+ q ^ {\star} (z \mid \mathbf {c}, x) = \frac {p (x , z \mid \mathbf {c})}{p (x \mid \mathbf {c})}.
464
+ $$
465
+
466
+ This completes the proof.
467
+
468
+ # E Case Study
469
+
470
+ To further demonstrate the effectiveness of PROMPTCOT, we visualize the step-by-step rationale generation and the corresponding problem produced by our problem generation model (as detailed in §2.3) in Figure 4 and 5, highlighting how the rationale is grounded in the problem's core elements and guides the model towards generating Olympiad-level problems.
471
+
472
+ Figures 6 - 10 show the solutions generated by PROMPTcot-Qwen-7B and PROMPTcot-DS-7B for various problem sources.
473
+
474
+ # Foundational Concepts:
475
+
476
+ 1. Geometric arrangements and intersection points, including the concept of interior points created by intersecting lines
477
+ 2. Understanding of expected value in probability theory
478
+ 3. Understanding of ratios and proportions, particularly in the context of comparing areas of geometric shapes
479
+ 4. Ability to apply algebraic manipulations, such as solving linear equations and simplifying expressions, to solve problems
480
+ 5. Knowledge of integer arithmetic and the properties of integers, including powers of 2
481
+
482
+ # Rationale:
483
+
484
+ # Step 1: Concept Selection and Combination
485
+
486
+ To design a problem at the AMC12 difficulty level, we need to combine multiple foundational concepts in a way that creates a challenging yet solvable problem. We start by selecting concepts that naturally fit together. In this case, we choose:
487
+
488
+ - Geometric arrangements and intersection points (Concept 1)
489
+ Expected value in probability theory (Concept 2)
490
+ - Ratios and proportions (Concept 3)
491
+ - Algebraic manipulations (Concept 4)
492
+ - Integer arithmetic and properties of integers (Concept 5)
493
+
494
+ We want to create a problem that requires the student to apply these concepts in a cohesive and logical manner.
495
+
496
+ # Step 2: Problem Theme and Context
497
+
498
+ To make the problem more engaging, we decide to use a geometric theme involving intersecting lines. We choose a square as the geometric shape, as it is simple and familiar to students. We also introduce the idea of interior points created by intersecting lines, which will be used to calculate expected values.
499
+
500
+ # Step 3: Problem Statement and Parameters
501
+
502
+ We define the problem statement: "For each interior point $P$ of a square, let $a(P)$ denote the number of distinct intersection points of the lines that pass through $P$ and the sides of the square." This statement introduces the concept of intersection points and sets the stage for the problem.
503
+
504
+ To add complexity, we introduce two specific points, $A$ and $B$ , and ask for the ratio $\frac{a(A)}{a(B)}$ . This requires students to apply their understanding of ratios and proportions (Concept 3).
505
+
506
+ # Step 4: Expected Value and Probability
507
+
508
+ To incorporate expected value (Concept 2), we introduce the idea of a randomly chosen point $P$ . We want students to think about the expected number of intersection points for a randomly chosen point, which will be used to calculate the ratio $\frac{a(A)}{a(B)}$ .
509
+
510
+ # Step 5: Algebraic Manipulations and Integer Arithmetic
511
+
512
+ To make the problem more challenging, we decide to ask for the sum of the digits of the ratio $\frac{a(A)}{a(B)}$ . This requires students to apply algebraic manipulations (Concept 4) and integer arithmetic (Concept 5) to simplify the expression and find the sum of its digits.
513
+
514
+ # Step 6: Difficulty Level and Constraints
515
+
516
+ To ensure the problem is at the AMC12 difficulty level, we need to balance the complexity of the concepts and the calculations required. We decide to keep the problem statement concise and focused on the key concepts, rather than introducing additional complexity.
517
+
518
+ # Step 7: Finalizing the Problem
519
+
520
+ After considering the above steps, we finalize the problem:
521
+
522
+ "For each interior point $P$ of a square, let $a(P)$ denote the number of distinct intersection points of the lines that pass through $P$ and the sides of the square. For example, $a(P)$ has the values shown below for some points $P$ in the square. What is the sum of the digits of $\frac{a(A)}{a(B)}$ ?"
523
+
524
+ This problem requires students to apply a range of concepts in a logical and cohesive manner, making it suitable for the AMC12 difficulty level.
525
+
526
+ # Final Problem:
527
+
528
+ For each interior point $P$ of a square, let $a(P)$ denote the number of distinct intersection points of the lines that pass through $P$ and the sides of the square. For example, $a(P)$ has the values shown below for some points $P$ in the square. [Diagram showing a square with points $A, B, C, D$ and their corresponding $a(P)$ values] What is the sum of the digits of $\frac{a(A)}{a(B)}$ ?
529
+
530
+ Figure 4: Case study of rationale and problem generation.
531
+
532
+ # Foundational Concepts:
533
+
534
+ 1. Skill in solving inequalities involving integers
535
+ 2. Ability to recognize and analyze patterns in number sequences, including multiples of a number
536
+ 3. Understanding of the unit circle and the periodicity of trigonometric functions, especially within the interval from 0 to $2\pi$
537
+ 4. Understanding of the concept of collinearity, including the ability to determine whether three points lie on the same line in 3D space
538
+ 5. Ability to set up and solve algebraic equations to represent and solve problems
539
+
540
+ # Rationale:
541
+
542
+ # Step 1: Selecting Foundational Concepts and Difficulty Level
543
+
544
+ I start by selecting the foundational concepts and the desired difficulty level for the problem. In this case, I choose the concepts of solving inequalities involving integers, recognizing patterns in number sequences, understanding the unit circle and periodicity of trigonometric functions, understanding collinearity, and setting up and solving algebraic equations. The desired difficulty level is HMMT-Nov, which suggests a challenging problem that requires careful analysis and application of multiple concepts.
545
+
546
+ # Step 2: Brainstorming Ideas
547
+
548
+ Next, I brainstorm ideas that combine the selected concepts. I consider problems involving sequences of integers, trigonometric functions, and 3D geometry. I think about how these concepts can be connected, and I start to form ideas about problems that could require the application of multiple concepts.
549
+
550
+ # Step 3: Focusing on Collinearity and Trigonometric Functions
551
+
552
+ I decide to focus on the concept of collinearity in 3D space, as it is a rich area for problem design. I think about how I can use trigonometric functions to create a scenario where three points are collinear. I consider the unit circle and the periodicity of trigonometric functions, and I realize that I can use the fact that the cosine and sine functions are periodic with a period of $2\pi$ to create a repeating pattern.
553
+
554
+ # Step 4: Introducing a Sequence of Integers
555
+
556
+ To incorporate the concept of sequences of integers, I decide to introduce a sequence $a_{n}$ that is defined in terms of $n$ . I consider how I can use the fact that $a_{n}$ is an integer to create a problem that requires careful analysis of the sequence. I think about how I can use the sequence to create a scenario where the three points are collinear.
557
+
558
+ # Step 5: Creating a Scenario with Collinear Points
559
+
560
+ I decide to create a scenario where three points are collinear in 3D space, and the coordinates of the points are defined in terms of the sequence $a_{n}$ . I think about how I can use the fact that the cosine and sine functions are periodic with a period of $2\pi$ to create a repeating pattern, and I realize that I can use this to create a scenario where the three points are collinear. I decide to use the coordinates $(x_{n},y_{n},z_{n})$ , where $x_{n} = \cos (a_{n})$ , $y_{n} = \sin (a_{n})$ , and $z_{n} = n$ .
561
+
562
+ # Step 6: Defining the Problem
563
+
564
+ I define the problem as finding the number of integers $1 \leq k \leq 2017$ such that the points $(x_{k}, y_{k}, z_{k})$ , $(x_{k+1}, y_{k+1}, z_{k+1})$ , and $(x_{k+2}, y_{k+2}, z_{k+2})$ all lie on the same line. I realize that this problem requires the application of multiple concepts, including solving inequalities involving integers, recognizing patterns in number sequences, understanding the unit circle and periodicity of trigonometric functions, understanding collinearity, and setting up and solving algebraic equations.
565
+
566
+ # Step 7: Finalizing the Problem
567
+
568
+ I finalize the problem by specifying the interval $0 \leq a_{n} < 2\pi$ and the condition that $a_{n}$ is an integer. I realize that this condition ensures that the problem is well-defined and that the solution can be found using careful analysis of the sequence $a_{n}$ . I am confident that the problem is at the desired difficulty level, and I am excited to see how students will approach the problem.
569
+
570
+ # Final Problem:
571
+
572
+ Let $a_1, a_2, \ldots$ be a sequence of integers such that $0 \leq a_n < 2\pi$ for all $n$ , and such that there exists a positive integer $M$ for which $a_n = a_{n + M}$ for all $n$ . For how many integers $1 \leq k \leq 2017$ do there exist real numbers $x_k, y_k, z_k$ such that the points $(x_k, y_k, z_k), (x_{k + 1}, y_{k + 1}, z_{k + 1})$ , and $(x_{k + 2}, y_{k + 2}, z_{k + 2})$ all lie on the same line, and such that $x_k = \cos(a_k)$ , $y_k = \sin(a_k)$ , and $z_k = k$ ?
573
+
574
+ Figure 5: Case study of rationale and problem generation.
575
+
576
+ # Problem:
577
+
578
+ Janet's ducks lay 16 eggs per day. She eats three for breakfast every morning and bakes muffins for her friends every day with four. She sells the remainder at the farmers' market daily for \(2 per fresh duck egg. How much in dollars does she make every day at the farmers' market?
579
+
580
+ # Thought Process:
581
+
582
+ <think>
583
+
584
+ Okay, so I need to figure out how much Janet makes every day from selling her duck eggs at the farmers' market. Let me try to break this down step by step.
585
+
586
+ First, the problem says that Janet's ducks lay 16 eggs per day. That seems straightforward. So, she has 16 eggs each day that she can potentially sell or use for something else.
587
+
588
+ Next, she eats three eggs for breakfast every morning. Hmm, so every day, she consumes 3 eggs. That means those 3 eggs aren't available for sale anymore. So, I should subtract those 3 eggs from the total number of eggs she lays each day. Let me write that down:
589
+
590
+ Total eggs laid: 16
591
+
592
+ Eggs eaten for breakfast: 3
593
+
594
+ So, eggs left after breakfast: $16 - 3 = 13$
595
+
596
+ Wait, hold on. Is that all? Because she also bakes muffins for her friends every day using four eggs. So, she uses another 4 eggs for baking muffins. That means I need to subtract those 4 eggs as well. Let me adjust my calculation:
597
+
598
+ Eggs used for muffins: 4
599
+
600
+ So, total eggs used: 3 (eaten) + 4 (baked) = 7
601
+
602
+ Therefore, the remaining eggs she can sell would be the total eggs minus the ones she ate and minus the ones she used for muffins. Let me compute that:
603
+
604
+ Eggs sold = Total eggs - Eggs eaten - Eggs used for muffins
605
+
606
+ Eggs sold $= 16 - 3 - 4 = 9$
607
+
608
+ So, she has 9 eggs left to sell each day. Now, she sells each fresh duck egg for $2 at the farmers' market. To find out how much money she makes daily, I need to multiply the number of eggs she sells by the price per egg.
609
+
610
+ Money made $=$ Eggs sold $\times$ Price per egg
611
+
612
+ Money made = 9 × $2 = $18
613
+
614
+ Let me just double-check my calculations to make sure I didn't miss anything. She starts with 16 eggs. She eats 3, which leaves her with 13. Then she uses 4 for muffins, leaving her with 9. She sells each of those 9 for \(2, so 9 \times 2 is indeed 18. Is there any other detail I might have overlooked? The problem mentions she lays 16 eggs per day, eats 3 for breakfast, bakes muffins with 4, and sells the rest. There doesn't seem to be any other usage or expense mentioned. So, it looks like all the eggs are either consumed or sold, no leftover eggs or additional costs.
615
+
616
+ Therefore, I think my calculation is correct. She makes $18 each day from selling her duck eggs at the farmers' market.
617
+
618
+ # Final Answer
619
+
620
+ Janet makes | 18 | dollars every day at the farmers' market.
621
+
622
+ </think>
623
+
624
+ # Solution:
625
+
626
+ Janet's ducks lay 16 eggs per day. She eats 3 eggs for breakfast every morning and uses 4 eggs to bake muffins daily. The remaining eggs are sold at the farmers' market for $2 per egg.
627
+
628
+ 1. Total eggs laid: 16
629
+ 2. Eggs eaten for breakfast: 3
630
+ 3. Eggs used for muffins: 4
631
+ 4. Eggs sold: $16 - 3 - 4 = 9$
632
+ 5. Money made from selling eggs: $9 \times 2 = 18$
633
+
634
+ Thus, Janet makes $\boxed{18}$ dollars every day at the farmers' market.
635
+
636
+ Figure 6: Solution generated by PROMPTcot-DS-7B for a GSM8K problem.
637
+
638
+ # Problem:
639
+
640
+ Jen enters a lottery by picking 4 distinct numbers from $S = \{1,2,3,\dots ,9,10\}$ . 4 numbers are randomly chosen from $S$ . She wins a prize if at least two of her numbers were 2 of the randomly chosen numbers, and wins the grand prize if all four of her numbers were the randomly chosen numbers. The probability of her winning the grand prize given that she won a prize is $\frac{m}{n}$ where $m$ and $n$ are relatively prime positive integers. Find $m + n$ .
641
+
642
+ # Solution:
643
+
644
+ # 1. Calculate the total number of ways to choose 4 numbers from $S$ :
645
+
646
+ The set $S$ contains 10 elements, so the number of ways to choose 4 distinct numbers from $S$ is:
647
+
648
+ $$
649
+ \left( \begin{array}{c} 1 0 \\ 4 \end{array} \right) = \frac {1 0 !}{4 ! 6 !} = 2 1 0
650
+ $$
651
+
652
+ # 2. Calculate the number of ways Jen can win a prize:
653
+
654
+ Jen wins a prize if at least 2 of her numbers match the randomly chosen numbers. We will use the complement rule to find this. First, we calculate the number of ways she can win no prize or exactly 1 prize.
655
+
656
+ # Number of ways she wins no prize:
657
+
658
+ She must choose 4 numbers from the 6 numbers that are not the randomly chosen numbers. The number of ways to do this is:
659
+
660
+ $$
661
+ \left( \begin{array}{c} 6 \\ 4 \end{array} \right) = \frac {6 !}{4 ! 2 !} = 1 5
662
+ $$
663
+
664
+ # - Number of ways she wins exactly 1 prize:
665
+
666
+ She must choose 1 number from the 4 randomly chosen numbers and 3 numbers from the 6 numbers that are not the randomly chosen numbers. The number of ways to do this is:
667
+
668
+ $$
669
+ \left( \begin{array}{c} 4 \\ 1 \end{array} \right) \left( \begin{array}{c} 6 \\ 3 \end{array} \right) = 4 \cdot \frac {6 !}{3 ! 3 !} = 4 \cdot 2 0 = 8 0
670
+ $$
671
+
672
+ Therefore, the number of ways she wins no prize or exactly 1 prize is:
673
+
674
+ $$
675
+ 1 5 + 8 0 = 9 5
676
+ $$
677
+
678
+ So, the number of ways she wins a prize is:
679
+
680
+ $$
681
+ 2 1 0 - 9 5 = 1 1 5
682
+ $$
683
+
684
+ # 3. Calculate the number of ways Jen can win the grand prize:
685
+
686
+ Jen wins the grand prize if all 4 of her numbers match the randomly chosen numbers. The number of ways to do this is:
687
+
688
+ $$
689
+ \left( \begin{array}{c} 4 \\ 4 \end{array} \right) = 1
690
+ $$
691
+
692
+ # 4. Calculate the probability of winning the grand prize given that she won a prize:
693
+
694
+ This is the ratio of the number of ways she can win the grand prize to the number of ways she can win a prize:
695
+
696
+ $$
697
+ \frac {1}{1 1 5}
698
+ $$
699
+
700
+ Since 1 and 115 are relatively prime, the probability is in its simplest form. Therefore, $m = 1$ and $n = 115$ , and the sum $m + n$ is:
701
+
702
+ $$
703
+ 1 + 1 1 5 = 1 1 6
704
+ $$
705
+
706
+ The final answer is:
707
+
708
+ 116
709
+
710
+ Figure 7: Solution generated by PROMPTcot-Qwen-7B for an AIME2024 problem.
711
+
712
+ # Problem:
713
+
714
+ There exist real numbers $x$ and $y$ , both greater than 1, such that $\log_x(y^x) = \log_y(x^{4y}) = 10$ . Find $xy$ .
715
+
716
+ # Solution:
717
+
718
+ To solve the problem, we start with the given equations:
719
+
720
+ $$
721
+ \log_ {x} (y ^ {x}) = 1 0 \quad \text {a n d} \quad \log_ {y} (x ^ {4 y}) = 1 0
722
+ $$
723
+
724
+ # Step 1: Convert logarithmic equations to exponential form
725
+
726
+ For the first equation, $\log_x(y^x) = 10$ , we have:
727
+
728
+ $$
729
+ x ^ {1 0} = y ^ {x}
730
+ $$
731
+
732
+ For the second equation, $\log_y(x^{4y}) = 10$ , we have:
733
+
734
+ $$
735
+ y ^ {1 0} = x ^ {4 y}
736
+ $$
737
+
738
+ # Step 2: Take natural logarithm of both sides
739
+
740
+ Starting with $x^{10} = y^x$ , we get:
741
+
742
+ $$
743
+ 1 0 \ln x = x \ln y
744
+ $$
745
+
746
+ Rearranging this equation, we obtain:
747
+
748
+ $$
749
+ \frac {\ln y}{\ln x} = \frac {1 0}{x}
750
+ $$
751
+
752
+ Taking the natural logarithm of both sides of $y^{10} = x^{4y}$ , we get:
753
+
754
+ $$
755
+ 1 0 \ln y = 4 y \ln x
756
+ $$
757
+
758
+ Rearranging this equation, we obtain:
759
+
760
+ $$
761
+ {\frac {\ln y}{\ln x}} = {\frac {4 y}{1 0}} = {\frac {2 y}{5}}
762
+ $$
763
+
764
+ # Step 3: Set equal expressions
765
+
766
+ Since both expressions are equal to $\frac{\ln y}{\ln x}$ , we can set them equal to each other:
767
+
768
+ $$
769
+ \frac {1 0}{x} = \frac {2 y}{5}
770
+ $$
771
+
772
+ # Step 4: Solve for $xy$
773
+
774
+ Cross-multiplying gives:
775
+
776
+ $$
777
+ 5 0 = 2 x y
778
+ $$
779
+
780
+ Solving for $xy$ , we get:
781
+
782
+ $$
783
+ x y = 2 5
784
+ $$
785
+
786
+ Thus, the value of $xy$ is $\boxed{25}$ .
787
+
788
+ Figure 8: Solution generated by PROMPTCoT-Qwen-7B for an AIME2024 problem.
789
+
790
+ # Problem:
791
+
792
+ Alice chooses a set $A$ of positive integers. Then Bob lists all finite nonempty sets $B$ of positive integers with the property that the maximum element of $B$ belongs to $A$ . Bob's list has 2024 sets. Find the sum of the elements of $A$ .
793
+
794
+ # Solution:
795
+
796
+ Let's denote the elements of $A$ as $a_1, a_2, \ldots, a_n$ where $a_1 < a_2 < \dots < a_n$ .
797
+
798
+ # Step 1: Analyze the sets $B$
799
+
800
+ For each element $a_i$ in $A$ , the sets $B$ that have $a_i$ as their maximum element are all subsets of $\{1, 2, \dots, a_i\}$ that include $a_i$ . The number of such subsets is $2^{a_i - 1}$ because we can choose any subset of $\{1, 2, \dots, a_i - 1\}$ and add $a_i$ to it.
801
+
802
+ # Step 2: Set up the equation
803
+
804
+ The total number of sets $B$ is the sum of $2^{a_i - 1}$ for all $i$ from 1 to $n$ :
805
+
806
+ $$
807
+ 2 ^ {a _ {1} - 1} + 2 ^ {a _ {2} - 1} + \dots + 2 ^ {a _ {n} - 1} = 2 0 2 4
808
+ $$
809
+
810
+ # Step 3: Convert 2024 to binary
811
+
812
+ $$
813
+ 2 0 2 4 _ {1 0} = 1 1 1 1 1 1 0 1 0 0 0 _ {2}
814
+ $$
815
+
816
+ This binary representation tells us that:
817
+
818
+ $$
819
+ 2 0 2 4 = 2 ^ {1 0} + 2 ^ {9} + 2 ^ {8} + 2 ^ {7} + 2 ^ {6} + 2 ^ {5} + 2 ^ {3}
820
+ $$
821
+
822
+ # Step 4: Identify the elements of $A$
823
+
824
+ From the binary representation, we can identify:
825
+
826
+ $$
827
+ \begin{array}{l} a _ {1} - 1 = 3 \\ a _ {2} - 1 = 5 \\ a _ {3} - 1 = 6 \\ a _ {4} - 1 = 7 \\ a _ {5} - 1 = 8 \\ a _ {6} - 1 = 9 \\ a _ {7} - 1 = 1 0 \\ \end{array}
828
+ $$
829
+
830
+ Therefore, the elements of $A$ are:
831
+
832
+ $$
833
+ a _ {1} = 4, a _ {2} = 6, a _ {3} = 7, a _ {4} = 8, a _ {5} = 9, a _ {6} = 1 0, a _ {7} = 1 1
834
+ $$
835
+
836
+ # Step 5: Calculate the sum
837
+
838
+ The sum of the elements of $A$ is:
839
+
840
+ $$
841
+ 4 + 6 + 7 + 8 + 9 + 1 0 + 1 1 = 5 5
842
+ $$
843
+
844
+ Thus, the sum of the elements of $A$ is $\boxed{55}$ .
845
+
846
+ Figure 9: Solution generated by PROMPTCOT-Qwen-7B for an AIME2024 problem.
847
+
848
+ # Problem:
849
+
850
+ Find the largest possible real part of $(75 + 117i)z + \frac{96 + 144i}{z}$ where $z$ is a complex number with $|z| = 4$ .
851
+
852
+ # Solution:
853
+
854
+ # Step 1: Express $z$ in polar form
855
+
856
+ Let $z = 4e^{i\theta} = 4(\cos \theta + i\sin \theta)$
857
+
858
+ Then $\frac{1}{z} = \frac{1}{4e^{i\theta}} = \frac{1}{4} e^{-i\theta} = \frac{1}{4} (\cos \theta - i\sin \theta)$
859
+
860
+ # Step 2: Expand first term
861
+
862
+ $$
863
+ \begin{array}{l} (7 5 + 1 1 7 i) \cdot 4 (\cos \theta + i \sin \theta) \\ = 4 (7 5 \cos \theta + 7 5 i \sin \theta + 1 1 7 i \cos \theta + 1 1 7 i ^ {2} \sin \theta) \\ = 4 (7 5 \cos \theta + 7 5 i \sin \theta + 1 1 7 i \cos \theta - 1 1 7 \sin \theta) \\ = 4 ((7 5 \cos \theta - 1 1 7 \sin \theta) + (7 5 \sin \theta + 1 1 7 \cos \theta) i) \\ = (3 0 0 \cos \theta - 4 6 8 \sin \theta) + (3 0 0 \sin \theta + 4 6 8 \cos \theta) i \\ \end{array}
864
+ $$
865
+
866
+ # Step 3: Expand second term
867
+
868
+ $$
869
+ \begin{array}{l} (9 6 + 1 4 4 i) \cdot \frac {1}{4} (\cos \theta - i \sin \theta) \\ = \frac {1}{4} (9 6 \cos \theta - 9 6 i \sin \theta + 1 4 4 i \cos \theta - 1 4 4 i ^ {2} \sin \theta) \\ = \frac {1}{4} (9 6 \cos \theta - 9 6 i \sin \theta + 1 4 4 i \cos \theta + 1 4 4 \sin \theta) \\ = \frac {1}{4} ((9 6 \cos \theta + 1 4 4 \sin \theta) + (- 9 6 \sin \theta + 1 4 4 \cos \theta) i) \\ = (2 4 \cos \theta + 3 6 \sin \theta) + (- 2 4 \sin \theta + 3 6 \cos \theta) i \\ \end{array}
870
+ $$
871
+
872
+ # Step 4: Add terms
873
+
874
+ $$
875
+ \begin{array}{l} (3 0 0 \cos \theta - 4 6 8 \sin \theta + 2 4 \cos \theta + 3 6 \sin \theta) \\ + (3 0 0 \sin \theta + 4 6 8 \cos \theta - 2 4 \sin \theta + 3 6 \cos \theta) i \\ = (3 2 4 \cos \theta - 4 3 2 \sin \theta) + (3 2 4 \sin \theta + 5 0 4 \cos \theta) i \\ \end{array}
876
+ $$
877
+
878
+ # Step 5: Find maximum real part
879
+
880
+ The real part is $324\cos \theta - 432\sin \theta$
881
+
882
+ Maximum value of $a\cos \theta +b\sin \theta$ is $\sqrt{a^2 + b^2}$
883
+
884
+ Here, $a = 324$ and $b = -432$
885
+
886
+ $$
887
+ \begin{array}{l} \text {M a x i m u m v a l u e} = \sqrt {3 2 4 ^ {2} + (- 4 3 2) ^ {2}} \\ = \sqrt {1 0 4 9 7 6 + 1 8 6 6 2 4} \\ = \sqrt {2 9 1 6 0 0} \\ = 5 4 0 \\ \end{array}
888
+ $$
889
+
890
+ Therefore, the largest possible real part is $\boxed{540}$ .
891
+
892
+ Figure 10: Solution generated by PROMPTCOT-Qwen-7B for an AIME2024 problem.
data/2025/2503_02xxx/2503.02324/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ee1de407df455d4adae86662dee03229cb90d7409950cd0fe27b24d5634efd1
3
+ size 516274
data/2025/2503_02xxx/2503.02324/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_02xxx/2503.02357/cde5e469-d77b-49c6-a6a3-634c026a56f4_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_02xxx/2503.02357/cde5e469-d77b-49c6-a6a3-634c026a56f4_model.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_02xxx/2503.02357/cde5e469-d77b-49c6-a6a3-634c026a56f4_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90801eb4333c08de961e70b067fbb8028047994aeb4e94e2f877a5545fc0b20b
3
+ size 3268263
data/2025/2503_02xxx/2503.02357/full.md ADDED
@@ -0,0 +1,453 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Q-Eval-100K: Evaluating Visual Quality and Alignment Level for Text-to-Vision Content
2
+
3
+ Zicheng Zhang $^{1}$ Tengchuan Kou $^{1}$ Shushi Wang $^{1}$ Chunyi Li $^{1}$ Wei Sun $^{1}$ Wei Wang $^{2}$ Xiaoyu Li $^{2}$ Zongyu Wang $^{2}$ Xuezhi Cao $^{2}$ Xiongkuo Min $^{1}$ Xiaohong Liu $^{1\dagger}$ Guangtao Zhai $^{1}$ Shanghai Jiao Tong University ${}^{2}$ Meituan
4
+
5
+ # Abstract
6
+
7
+ Evaluating text-to-vision content hinges on two crucial aspects: visual quality and alignment. While significant progress has been made in developing objective models to assess these dimensions, the performance of such models heavily relies on the scale and quality of human annotations. According to Scaling Law, increasing the number of human-labeled instances follows a predictable pattern that enhances the performance of evaluation models. Therefore, we introduce a comprehensive dataset designed to Evaluate Visual quality and Alignment Level for text-to-vision content (Q-EVAL-100K), featuring the largest collection of human-labeled Mean Opinion Scores (MOS) for the mentioned two aspects. The Q-EVAL-100K dataset encompasses both text-to-image and text-to-video models, with 960K human annotations specifically focused on visual quality and alignment for 100K instances (60K images and 40K videos). Leveraging this dataset with context prompt, we propose Q-Eval-Score, a unified model capable of evaluating both visual quality and alignment with special improvements for handling long-text prompt alignment. Experimental results indicate that the proposed Q-Eval-Score achieves superior performance on both visual quality and alignment, with strong generalization capabilities across other benchmarks. These findings highlight the significant value of the Q-EVAL-100K dataset. Data and codes will be available at https://github.com/zzc-1998/Q-Eval.
8
+
9
+ # 1. Introduction
10
+
11
+ With the rapid advancement of generative AI, millions of text-to-image and text-to-video content are being generated daily across various platforms [26, 57], applied in industrial production or directly used by consumers. However, due to current technological limitations, text-to-vision con
12
+
13
+ ![](images/924c059b51e0c4c07b8d0ed180972c6e8eff2be9520ff3c294f5e41f4aa13099.jpg)
14
+ Figure 1. Illustration of the unified evaluation dimensions of Q-Eval-100K. We focus on visual quality (including all factors that may impact the viewing experience) and alignment level, which measures the accuracy of the generated content to the prompt.
15
+
16
+ tent often falls short of being perfect upon generation and cannot be immediately deployed [35, 37, 41], which usually requires expert evaluation, editing, and fine-tuning. As a result, numerous efforts have been made to develop automated methods for evaluating text-to-vision content, aiming to control the quality of generation and guide the necessary optimizations effectively [14, 27, 28, 30, 41, 61-64, 66, 71, 73]. Through extensive theoretical and experimental analy
17
+
18
+ Table 1. A brief comparison of the latest text-to-vision evaluation datasets (I. for image, V. for video). For Annotation Type, SBS (side-by-side) and MOS (mean-opinion-score) refer to selecting the preferred instance from a pair of instances and assigning an average absolute score to a single instance respectively. For Rating Concerns, Overall indicates assigning scores from a holistic perspective while $\checkmark$ denotes assigning separate scores to quality or alignment. For Number, Ins. and Ann. stand for the number of instances and human annotations respectively.
19
+
20
+ <table><tr><td rowspan="2">Dataset</td><td rowspan="2">Year</td><td rowspan="2">Content</td><td rowspan="2">Annotation Type (Single/Pair)</td><td colspan="2">Evaluation Concern</td><td rowspan="2">Number Ins./Ann.</td></tr><tr><td>Quality</td><td>Alignment</td></tr><tr><td>Pick-a-pic [28]</td><td>2023</td><td>I.</td><td>SBS</td><td colspan="2">Overall</td><td>1M/500K</td></tr><tr><td>ImageReward [66]</td><td>2023</td><td>I.</td><td>SBS</td><td colspan="2">Overall</td><td>68k/137k</td></tr><tr><td>HPDv2 [64]</td><td>2023</td><td>I.</td><td>SBS</td><td colspan="2">Overall</td><td>430k/645K</td></tr><tr><td>AGIQA-3k [35]</td><td>2023</td><td>I.</td><td>MOS</td><td>✓</td><td>✓</td><td>3K/81K</td></tr><tr><td>AIGCQA2023 [59]</td><td>2023</td><td>I.</td><td>MOS</td><td>✓</td><td>✓</td><td>2K/17K</td></tr><tr><td>PKU-AIGQA-4k [68]</td><td>2024</td><td>I.</td><td>MOS</td><td>✓</td><td>✓</td><td>4K/84K</td></tr><tr><td>AIGQA-20k [36]</td><td>2024</td><td>I.</td><td>MOS</td><td colspan="2">Overall</td><td>20K/420K</td></tr><tr><td>RichHF [40]</td><td>2024</td><td>I.</td><td>MOS</td><td>✓</td><td>✓</td><td>18K/54K</td></tr><tr><td>VideoFeedback [20]</td><td>2024</td><td>V.</td><td>MOS</td><td>✓</td><td>✓</td><td>37.6K/37.6K</td></tr><tr><td>T2VQA-DB [29]</td><td>2024</td><td>V.</td><td>MOS</td><td colspan="2">Overall</td><td>10K/27K</td></tr><tr><td>GenAI-Bench [34]</td><td>2024</td><td>I.&amp;V.</td><td>1-5 Likert Scale</td><td colspan="2">Overall</td><td>9.6K/9.6K</td></tr><tr><td>Q-Eval-100K (Ours)</td><td>2024</td><td>I.&amp;V.</td><td>MOS</td><td>✓</td><td>✓</td><td>100K/960K</td></tr></table>
21
+
22
+ sis, the evaluation of text-to-vision content can be primarily divided into two dimensions [42, 65, 66, 74, 77]: Visual Quality (the perceived quality of the visual content, which can be simply understood as how good it looks) and Alignment level (the consistency between text and vision, which can be interpreted as how accurate the generation is).
23
+
24
+ To meet the need for evaluation, many text-to-vision evaluation datasets have been proposed, along with corresponding evaluation algorithms [12, 13, 20, 20, 25, 29, 34-36, 40, 42, 59, 68, 72, 75, 76]. However, these efforts face the following significant limitations: 1) The key evaluation dimensions for text-to-vision content are often not systematically captured. Some datasets propose too many dimensions, adding unnecessary complexity to the evaluation process. The practical applicability of these dimensions can be narrow or lead to redundancy. 2) Most text-to-vision evaluation datasets fail to disentangle visual quality and alignment. These datasets either focus solely on alignment or visual quality, or merge both dimensions into a single score, leading to results that are often incomplete and ambiguous, making it challenging to address specific evaluation needs. 3) The scale of these datasets remains insufficient. With the rise of Large Multimodal Models (LMMs), which have demonstrated strong capabilities in visual and textual understanding, researchers are increasingly leveraging them for text-to-vision evaluation. However, current dataset sizes remain inadequate to fully unlock the potential of LMM-based models [77], conceivably restricting their applicability and generalization in real-world scenarios.
25
+
26
+ To address these challenges, we present Q-Eval-100K, which, to the best of our knowledge, is the largest text-to-vision evaluation dataset with Mean Opinion Scores (MOSs), comprising 100K instances (including 60K generated images & 40K generated videos). A brief comparison of Q-Eval-100K and previous text-to-vision eval
27
+
28
+ uation datasets is illustrated in Table 1. We manually gather prompts from existing benchmarks and create diverse prompts that focus on three key aspects: entity generation, entity attribute generation, and interaction capability. The instances in Q-Eval-100K are then generated from a diverse range of generative models, both open-source and closed-source, to ensure high diversity. We implement a rigorous, scientifically grounded subjective evaluation process using a Sample & Scrutinize strategy, focusing on both visual quality and alignment level for each of the 100K instances, yielding a total of high-quality 960K human annotations.
29
+
30
+ Building on the proposed Q-Eval-100K, we propose Q-Eval-Score, a unified evaluation framework capable of assessing both visual quality and alignment, providing separate scores for each dimension. We first adapt Q-Eval100K into a Supervised Fine-Tuning (SFT) dataset optimized for injecting knowledge into LMMs. Scores are transformed into adjective-based ratings, then reformulated within a well-guided context-prompt format. Specifically, for visual quality, we guide the model to identify positive and negative visual impacts, evaluate the intensity of these impacts, and make a balanced judgment. For alignment, we guide the model to perceive the overall situation, examine details, and reach a balanced judgment. The fine-tuning process is then supervised by a combined CE and MSE loss. During inference, the final score is computed as a weighted average based on the probability of each rating token. Notably, in handling long-prompt alignment, we observed that direct alignment assessment often yields low scores due to oversimplification. To address this, we propose a Vague-to-Specific strategy, where a long prompt is converted into a vague version retaining only core information and multiple prompts with specific details. These prompts are evaluated separately and the alignment scores are combined to the final score. Our contributions can be summarized as follows:
31
+
32
+ - We present Q-Eval-100K, the largest text-to-vision evaluation dataset with MOSs, comprising 100K instances from various generative models. We employ a scientifically grounded evaluation methodology, using a Sample & Scrutinize strategy to collect 960K human annotations focusing on visual quality and alignment.
33
+ - We propose Q-Eval-Score, a unified evaluation framework capable of independently assessing visual quality and alignment, providing separate scores for each dimension. Specifically, we adapt Q-Eval-100K into an SFT dataset with adjective-based ratings in a structured context-prompt format for enhancing the visual quality and alignment evaluation capabilities of LMMs.
34
+ - To improve alignment evaluation for long prompts, we introduce a Vague-to-Specific strategy, which separates prompts into core and detailed variants, yielding a more accurate alignment score through weighted averaging.
35
+
36
+ # 2. Related Works
37
+
38
+ # 2.1. Benchmarks for Text-to-Vision Evaluation
39
+
40
+ Early text-to-vision benchmarks largely depend on multimodal datasets labeled with captions [11, 23, 44]. However, with increasing recognition of human feedback's value [15, 24, 77], many benchmarks begin to employ human annotations. Common annotation methods include SBS (side-by-side) and MOS (mean opinion score) [50, 69]. SBS requires selecting a preferred instance from a pair, while MOS assigns a score to a single instance. SBS is generally easier for human subjects and more precise, but MOS is more versatile and broadly applicable to various situations [7, 33].
41
+
42
+ Text-to-vision evaluation dimensions [42, 74] can be categorized into visual quality and alignment. While some benchmarks [12, 13, 20, 25] treat aspects like naturalness, aesthetics, and temporal consistency as distinct dimensions, we view these as components of visual quality since they collectively influence the quality of experience (QoE) for viewers. Early benchmarks [35, 36, 59, 68] for generated images comprehensively address both visual quality and alignment for evaluation. RichHF [40] enhances these evaluations by incorporating subjective scores, heatmaps, and misalignment tokens. For video, VideoFeedback [20] introduces five dimensions of quality and alignment, while T2VQA-DB [29] focuses primarily on visual quality. Further, GenAI-Bench [34] evaluates alignment for both generated images and generated videos. The proposed Q-Eval-100K dataset offers a unified text-to-vision evaluation framework, significantly increasing dataset scale and diversity, making it distinct from prior benchmarks.
43
+
44
+ # 2.2. Metric for Text-to-Vision Evaluation
45
+
46
+ Previous evaluation methods separately focus on either visual quality or alignment. Perceptual methods assess the visual quality of generated content, utilizing traditional scores like IS [53], FID [22], and LPIPS [70] with pretrained neural networks. Recently, data-driven models [35] trained on specialized datasets have further advanced perceptual score prediction. Additionally, methods such as CLIP-IQA [58] and Q-Align [63] leverage text prompts to enhance perceptual alignment. Alignment methods, integrating both text and vision modalities, initially use CLIP-Score [21] due to its ease of application. To address more complex prompts, some approaches [28, 64, 66] incorporate human feedback to improve evaluation accuracy. Given the powerful interpretative capabilities of LMMs, recent work [14, 27, 30, 41, 71] has begun to apply these models to alignment assessments. Most existing models evaluate either perceptual quality or alignment exclusively. The proposed Q-Eval-Score addresses this gap by offering decoupled scores for both perceptual quality and alignment.
47
+
48
+ ![](images/3960671fba2f0270b4bd16208b926c01cd1d5121fcb1baa4e6bc77e325ffce0d.jpg)
49
+ Figure 2. Illustration of the Sample and Scrutinize quality control strategy for annotations in Q-Eval-100K. We randomly select a sample of 5K instances from the full dataset, which are then reviewed by experts to establish golden scores. A batch of annotations is approved only if the scores of the sampled instances show a high correlation with these expert-assigned golden scores.
50
+
51
+ # 3. Q-Eval-100K Construction
52
+
53
+ # 3.1. Basic Principles
54
+
55
+ The construction process of Q-Eval-100K is illustrated in Fig. 3. We follow these guiding principles: 1) Ensuring diversity in generated content by collecting a wide range of prompts and using multiple generative models; 2) Ensuring annotation quality through carefully designed experimental settings and standards to achieve the highest accuracy; 3) Ensuring effective learning, through adapting the data for LMM suitability by transforming both visual quality and alignment scores into a context-aware SFT dataset.
56
+
57
+ # 3.2. Sources Collection
58
+
59
+ Prompt Designing. The prompt design focuses on three main aspects: Entity Generation, Entity Attribute Generation, and Interaction Capability. 1) Entity generation targets the primary entities (people, objects, etc.) to be generated. 2) Entity attribute generation emphasizes the attributes (clothing, color, material, etc.) of the entities. 3) Interaction capability focuses on the interactions between the generated entities and other entities or the background, such as their spatial relationships and actions. Following the outlines mentioned above, we manually create a portion of the prompts and extract some from existing datasets [34, 47].
60
+
61
+ Generation Models. We utilize multiple popular text-to-image and text-to-video models to ensure diversity, which include FLUX [31], Lumina-T2X [18], PixArt [10], Stable Diffusion 3 [4], Stable Diffusion XL [51], DALL-E 3 [48], Wanx [16], Midjourney [45], Hunyuan-DiT [39], Kolors [55], ERNIE-ViLG [17], CogVideoX [67], Runway GEN-2 [19], Runway GEN-3 [52], Latte [43], Kling [54], Dreamina [9], Luma [2], PixVerse [3], Pika [32], Stable Video Diffusion [8], Vidu [56].
62
+
63
+ ![](images/5490e1b219f065fd963437a42eae5219b44087ddb6f442ea2e8a359c7ce2e833.jpg)
64
+ Figure 3. Overview of the Q-Eval-100K construction process. We design a wide range of prompts and employ various text-to-vision models to generate diverse content. Subjective evaluations are then conducted to rate the visual quality and alignment of these generated instances. The resulting scores form the SFT dataset, which can help inject corresponding knowledge into LMMs.
65
+
66
+ ![](images/9120034d587001bf210f473431001083e9b08ca109d6269e735e42ce9205d713.jpg)
67
+
68
+ ![](images/c630061e7561c52af611bed630ec6f895e7a8ad01fe835019b6050446f2e17d8.jpg)
69
+
70
+ # 3.3. Subjective Experiment
71
+
72
+ Given the large scale of Q-Eval-100K, we develop rigorous experimental protocols to ensure the highest possible accuracy in our annotations. To facilitate this, we establish a well-controlled indoor experimental environment, where more than 200 human subjects are recruited to participate in the annotation. To ensure the accuracy of annotations is not compromised by individual cognitive differences or annotator fatigue, we propose a Sample & Scrutinize data control strategy as shown in Fig. 2. The strategy includes two steps: 1) First, we randomly sample 5,000 instances from the entire dataset. We then organize experts with rich experience to discuss and score these instances, leading to the establishment of golden scores for both visual quality and alignment. These golden scores remain hidden for all subsequent experimental subjects. 2) Next, we provide human annotators with comprehensive training before they begin the annotation process. After each batch, we gather the scores for instances that have golden scores and compare them with these golden scores, calculating the correlation values (SRCC-rank similarity). Only batches with an SRCC above 0.8 are accepted, otherwise, they are rejected.
73
+
74
+ Additionally, we split Q-Eval-100K into training and testing sets in an 80:20 ratio. Each instance in the training set has at least three annotations, while each instance in the testing set has a minimum of twelve annotations to ensure accuracy. This process results in a total of over 960K annotations, calculated as follows: 80K (training instances) x 2 (visual quality & alignment) x 3 (minimum annotation number) + 20K (training instances) x 2 (visual quality & alignment) x 12 (minimum annotation number) = 960K annotations. Finally, we calculate the average of the multiple annotations to derive the score for each instance.
75
+
76
+ # 3.4. Statistical Analysis
77
+
78
+ The distributions of MOSs for visual quality and alignment are exhibited in Fig. 4 respectively, which reveal several key insights. In general, there are substantial differences among generation models in both visual quality and alignment, with their distributions displaying significant inconsistencies, indicating varied performance across different generation prompts. 1) For image alignment, distributions are generally skewed higher, suggesting that models perform relatively well in aligning images, though multiple peaks in the 4-5 and 2-3 score ranges indicate some fluctuation in performance. 2) In video alignment, model performance varies more markedly, with most scores concentrated between 2 and 4, highlighting the need for improvement in alignment in video generation. 3) Visual quality for images scores noticeably lower than image alignment, indicating that generation models perform significantly worse in visual quality. Furthermore, the wider distribution spread in image visual quality suggests greater variance and instability across models. 4) Similarly, video visual quality scores are lower than alignment scores, highlighting a consistent underperformance in visual quality. Interestingly, models such as Kling, Dreamina, Luma, PixVerse, and Pika exhibit similar distributions for alignment and visual quality, indicating consistent capability across both aspects. However, this consistency is not observed across all models.
79
+
80
+ Overall, the findings above highlight a notable disparity in that visual quality generally falls behind alignment. This gap likely stems from the current emphasis on alignment optimization, which is also relatively easier to improve, whereas visual quality has received less focus. This analysis underscores the importance of Q-Eval-100K as a comprehensive benchmark for evaluating both dimensions.
81
+
82
+ ![](images/43046933bb78c17cbca94afdd21929a358f9286733e1d6d75b90c1e034c6a58c.jpg)
83
+
84
+ ![](images/2524943a2ec883de52526a7420d6022ea3a8c43fc5a3b56e85f8c7080426a161.jpg)
85
+ (b) Video Alignment
86
+
87
+ ![](images/146aff042564aea025ddeee13830ec0c85c5797450f843c206d8853f94a52535.jpg)
88
+ (a) Image Alignment
89
+ (c) Image Visual Quality
90
+
91
+ ![](images/9807be839cd39bb383ed1a522e99c7c1f465d8e6f359b0661ce868fae252b3d6.jpg)
92
+ (d) Video Visual Quality
93
+ Figure 4. MOS distributions for the visual quality and alignment of generated images and videos in the Q-Eval-100K dataset respectively.
94
+
95
+ # 4. Q-Eval-Score
96
+
97
+ # 4.1. Unified Pipeline for Decoupled Evaluation
98
+
99
+ Although the evaluation of visual quality and alignment are two relatively independent tasks, we leverage the adaptability and extensive prior knowledge of LMMs to propose a unified model, Q-Eval-Score, that addresses both visual quality and alignment level evaluation within a single framework. Specifically, we convert the human-labeled MOS from the Q-Eval-100K dataset for both visual quality and alignment levels into a fixed-prompt format, creating a mixed SFT dataset. We then fine-tune the LMM, enabling it to evaluate both visual quality and alignment levels.
100
+
101
+ # 4.2. How to Teach LMMs to Evaluate
102
+
103
+ # 4.2.1 Context Prompt
104
+
105
+ In previous work using prompts with LMMs for evaluation, the questions are often straightforward and simple, such as 'Can you evaluate the quality of the image?' (Q-Align [63]) or 'Does this figure show [Prompt]? Please answer yes or no.' (VQAScore [41]). However, this simplicity may lead to confusion for the model, as the prompts may not be specific enough to guide a more detailed or accurate evaluation.
106
+
107
+ Inspired by the chain-of-thought (CoT [60]) concept and given that humans undergo a reasoning process when evaluating visual quality and alignment, we propose a Context-Prompt format to construct our SFT dataset. For the visual quality task, the human evaluation process can be summarized as first identifying both positive and negative quality factors, then measuring these factors, and finally weighing them to reach a conclusion. Based on this process, we design the following prompt structure:
108
+
109
+ # Context Prompt for Visual Quality
110
+
111
+ # User: Suppose you are an expert in evaluating the visual quality of AI-generated image/video. First, identify any visual distortions and positive visual appeal regarding low-level features and aesthetics. Next, assess the severity of distortions and their impact on the viewing experience, noting whether they are subtle or distracting, and evaluate how the positive features enhance the visual appeal, considering their strength and contribution to the overall aesthetics. Finally, balance the identified distortions against the positive aspects and give your rating on the visual quality. Your rating should be chosen from the following five categories: [Excellent, Good, Fair, Poor, and Bad]. For this image/video [Image/Frames], the text prompt is [Prompt]. # Answer: [Rating] (Excellent, Good, Fair, Poor, Bad).
112
+
113
+ For the alignment task, the human evaluation process involves observing whether the overall content generally aligns with the text, followed by a more detailed comparison, and finally a comprehensive evaluation for conclusion:
114
+
115
+ # Context Prompt for Visual Quality
116
+
117
+ # User: Suppose you are an expert in evaluating alignment between the text prompt and the AI-generated image/video. Begin by considering whether the overall concept of the prompt is captured in the image/video. Then, examine the specific details, such as the presence of key objects, their attributes, and relationships. Check if the visual content accurately reflects these aspects. Finally, give your alignment rating considering both overall and detailed accuracy. Your rating should be chosen from the following five categories: [Excellent, Good, Fair, Poor, and Bad]. For this image/video [Image/Frames], the text prompt is [Prompt]. # Answer: [Rating] (Excellent, Good, Fair, Poor, Bad).
118
+
119
+ ![](images/da35c470cd679706554ab8aa42273f7c4e8654c7afa3a6c09d7da5930ba6d19a.jpg)
120
+ Figure 5. The pipeline of the proposed Q-Eval-Score model involves multiple stages. First, the Q-Eval-100K SFT dataset is used to train the LMM on visual quality and alignment knowledge. Then, context prompts are applied to guide the LMM towards generating more detailed and accurate outputs. Finally, the rating token probabilities are converted into predicted scores. Additionally, long prompt alignment is achieved through a Vague-to-Specific strategy to further refine the model's responses.
121
+
122
+ # 4.2.2 'Translating' MOS into Ratings
123
+
124
+ It is well established that discrete adjective ratings are easier for LMMs to interpret compared to numerical scores [63, 73]. Since MOS in Q-Eval-100K is labeled in absolute terms, we can easily map MOS to the corresponding rating:
125
+
126
+ $$
127
+ R (s) = r _ {i} \text {i f} m + \frac {i - 1}{5} \times (M - m) < s \leq m + \frac {i}{5} \times (M - m), \tag {1}
128
+ $$
129
+
130
+ $\{r_i|_{i = 1\sim 5}\} = \{Bad,Poor,Fair,Good,Excellent\}$
131
+
132
+ where $m = 1$ and $M = 5$ (score range bound of Q-Eval-100K), $R(s)$ indicates the mapped rating of MOS value $s$ .
133
+
134
+ # 4.3. Model Architecture
135
+
136
+ Using the constructed SFT dataset with the question-answer pairs as described, we select Qwen-VL [6] as the LMM model (Qwen2-VL-7B-Instruct) for training, which has demonstrated strong visual understanding capabilities for both images and videos. For video processing, each video is converted into a sequence of images at a rate of one frame per second, which is then fed into the model as the input. The scoring computation method is detailed as follows. For the rating token, we first calculate the model output probabilities $p_j$ for each of the five rating terms {Excellent, Good, Fair, Poor, Bad}, where $j \in \{1, 2, 3, 4, 5\}$ . Then we define the final predicted rating $\hat{r}$ as the weighted average of these probabilities:
137
+
138
+ $$
139
+ \hat {r} = \sum_ {j = 1} ^ {5} p _ {j} \cdot w _ {j}, \tag {2}
140
+ $$
141
+
142
+ where $w_{j}$ is the numerical weight assigned to each rating (e.g., $w_{j} = \{1,0.75,0.5,0.25,0\}$ for Excellent to Bad).
143
+
144
+ # 4.4. Loss Function
145
+
146
+ The loss function for the model consists of two parts: Cross-Entropy (CE) Loss and Mean Squared Error (MSE) Loss. The CE Loss can assist the LMM in learning the general question-answer format and necessary knowledge. Meanwhile, the MSE Loss refines the score prediction accuracy. The CE Loss for question-answer pairs is defined as:
147
+
148
+ $$
149
+ \mathcal {L} _ {C E} = - \sum_ {i = 1} ^ {N} y _ {i} \cdot \log \left(p _ {i}\right), \tag {3}
150
+ $$
151
+
152
+ where $y_{i}$ is the one-hot encoded vector representing the true label for instance $i$ , and $p_{i}$ is the predicted probability vector for the answer tokens. The MSE Loss can then be given by:
153
+
154
+ $$
155
+ \mathcal {L} _ {M S E} = \left(\hat {r} - r _ {\mathrm {M O S}}\right) ^ {2}, \tag {4}
156
+ $$
157
+
158
+ where $\hat{r}$ and $r_{\mathrm{MOS}}$ represent the predicted scores and the MOS labels respectively. The total loss $\mathcal{L}$ is a weighted sum of the CE Loss and MSE Loss:
159
+
160
+ $$
161
+ \mathcal {L} = \alpha_ {1} \cdot \mathcal {L} _ {C E} + \beta_ {1} \cdot \mathcal {L} _ {M S E}, \tag {5}
162
+ $$
163
+
164
+ where $\alpha_{1}$ and $\beta_{1}$ (default set as $1\& 1$ ) are weight parameters controlling the contribution of each loss term.
165
+
166
+ # 4.5. Handling Long Prompt Alignment
167
+
168
+ During inference, we observe that the trained LMM tends to undervalue alignment when handling long prompts (more than 25 words). This is partly because long prompts are underrepresented in the training data, leading to insufficient training. More importantly, the LMM acts as a strict evaluator, often penalizing significant points for inconsistencies that may seem minor to humans. These small discrepancies occur more frequently with long prompts. To manage this
169
+
170
+ ![](images/70f2fa039c6bbe87d639f647285a267992e6ae3450f5f19e3f80d2f6db9aca0d.jpg)
171
+ Generated Image
172
+ Original Long Prompt
173
+ Figure 6. An example of the Vague-to-Specific strategy. The original long prompt is divided by the LLM (QwenLM [5]) into a Vague Prompt and several Specific Prompts. The alignment score is first calculated separately for each part, then combined using weighted averaging to form the final score.
174
+
175
+ A new Chinese style painting, a group of animals are having a meeting, the owl is sitting in the leader's position and speaking, they are all wearing clothes with futuristic designs, and the picture should have a sense of technology.
176
+
177
+ <table><tr><td>Vague Prompt</td><td colspan="2">In a futuristic Chinese-style painting, an owl leads a tech-themed animal meeting. 4 A_v=4</td></tr><tr><td rowspan="2">Specific Prompts</td><td colspan="2">-A group of animals are depicted in a meeting. 5</td></tr><tr><td>-The owl is the leader, sitting in the center, speaking. 5</td><td>1/n ∑i=1n As_i = 13/3</td></tr><tr><td>Final Align. 0.5*4+0.5*4.33 = 4.17</td><td>-Each animal wears futuristic clothing, adding a tech flair to the scene.</td><td>3</td></tr></table>
178
+
179
+ issue, we propose a Vague-to-Specific strategy. We use an additional LLM (QwenLM [5]) to summarize long prompts, retaining only the core features while filtering out details, producing a concise Vague Prompt. Then, we split the long prompt into Specific Prompts (no more than three), each maintaining full details but avoiding redundancy:
180
+
181
+ $$
182
+ \left(P _ {v}, \left\{P _ {s _ {1}}, \dots , P _ {s _ {n}} \right\}\right) = \mathcal {V S} \left(P _ {L o n g}\right), \tag {6}
183
+ $$
184
+
185
+ where $P_{v}$ represents the Vague Prompt, $\{P_{S_1},\dots ,P_{S_n}\}$ stands for the set of Specific Prompts, $\mathcal{VS}(\cdot)$ indicates the prompt split function, and $P_{Long}$ is the original long prompt. For the Vague Prompt, we calculate alignment in the usual way. However, directly asking for consistency with the Specific Prompts is not appropriate since each one addresses only part of the vision content. Drawing inspiration from the VQAScore [41] approach, we modify the question to a softer format, such as 'Does the image/video show [Prompt]?' to evaluate alignment (measuring as the logit probability of answering 'Yes') for each Specific Prompts. Finally, we combine the results from both the Vague Prompt and Specific Prompt using a weighted approach to calculate the final alignment score:
186
+
187
+ $$
188
+ \mathcal {A} _ {f} = \alpha_ {2} \mathcal {A} _ {v} + \beta_ {2} \left(\frac {1}{n} \sum_ {i = 1} ^ {n} \mathcal {A} _ {s _ {i}}\right), \tag {7}
189
+ $$
190
+
191
+ where $\mathcal{A}_f$ , $\mathcal{A}_v$ , and $\mathcal{A}_{s_i}$ are the alignment scores for the final evaluation, vague prompt, and $i$ -th specific prompt. $\alpha_{2}$ and $\beta_{2}$ (0.5 & 0.5 as default) are weight parameters.
192
+
193
+ Table 2. Performance comparison on the visual quality aspect of Q-Eval-100K. Best in bold, second underlined.
194
+
195
+ <table><tr><td rowspan="3">Model
196
+ (Visual
197
+ Quality)</td><td colspan="4">Q-Eval-Image</td><td colspan="4">Q-Eval-Video</td></tr><tr><td colspan="2">Instance-level</td><td colspan="2">Model-level</td><td colspan="2">Instance-level</td><td colspan="2">Model-level</td></tr><tr><td>SRCC</td><td>PLCC</td><td>SRCC</td><td>PLCC</td><td>SRCC</td><td>PLCC</td><td>SRCC</td><td>PLCC</td></tr><tr><td>NIQE</td><td>0.239</td><td>0.238</td><td>0.829</td><td>0.758</td><td>-0.057</td><td>-0.074</td><td>-0.333</td><td>-0.241</td></tr><tr><td>CLIP-IQA</td><td>0.334</td><td>0.324</td><td>0.600</td><td>0.685</td><td>0.194</td><td>0.175</td><td>0.095</td><td>0.076</td></tr><tr><td>Q-Align</td><td>0.587</td><td>0.578</td><td>0.714</td><td>0.914</td><td>0.500</td><td>0.502</td><td>0.762</td><td>0.762</td></tr><tr><td>IPCE</td><td>0.550</td><td>0.560</td><td>0.933</td><td>0.937</td><td>0.299</td><td>0.302</td><td>0.476</td><td>0.568</td></tr><tr><td>Q-Eval-Score</td><td>0.732</td><td>0.731</td><td>0.943</td><td>0.949</td><td>0.601</td><td>0.609</td><td>0.762</td><td>0.814</td></tr></table>
198
+
199
+ Table 3. Performance comparison on the alignment aspect of Q-Eval-100K. Considering that CLIPScore, BLIP2Score, and VQAScore are popular zero-shot alignment evaluation metrics, we provide the corresponding performance with the official default weight as well (marked with *).
200
+
201
+ <table><tr><td rowspan="3">Model
202
+ (Align-
203
+ ment)</td><td colspan="4">Q-Eval-Image</td><td colspan="4">Q-Eval-Video</td></tr><tr><td colspan="2">Instance-level</td><td colspan="2">Model-level</td><td colspan="2">Instance-level</td><td colspan="2">Model-level</td></tr><tr><td>SRCC</td><td>PLCC</td><td>SRCC</td><td>PLCC</td><td>SRCC</td><td>PLCC</td><td>SRCC</td><td>PLCC</td></tr><tr><td>CLIPScore*</td><td>0.245</td><td>0.252</td><td>0.617</td><td>0.685</td><td>0.186</td><td>0.219</td><td>0.518</td><td>0.500</td></tr><tr><td>BLIP2Score*</td><td>0.297</td><td>0.330</td><td>0.764</td><td>0.835</td><td>0.218</td><td>0.250</td><td>0.295</td><td>0.296</td></tr><tr><td>VQAScore*</td><td>0.549</td><td>0.468</td><td>0.385</td><td>0.555</td><td>0.433</td><td>0.432</td><td>0.433</td><td>0.351</td></tr><tr><td>CLIPScore</td><td>0.768</td><td>0.740</td><td>0.958</td><td>0.956</td><td>0.431</td><td>0.443</td><td>0.519</td><td>0.509</td></tr><tr><td>BLIP2Score</td><td>0.766</td><td>0.743</td><td>0.933</td><td>0.934</td><td>0.483</td><td>0.488</td><td>0.512</td><td>0.481</td></tr><tr><td>ImageReward</td><td>0.762</td><td>0.732</td><td>0.925</td><td>0.955</td><td>0.472</td><td>0.485</td><td>0.375</td><td>0.362</td></tr><tr><td>Q-Eval-Score</td><td>0.822</td><td>0.802</td><td>0.964</td><td>0.969</td><td>0.607</td><td>0.634</td><td>0.648</td><td>0.605</td></tr></table>
204
+
205
+ # 5. Experiment
206
+
207
+ # 5.1. Experimental Setup
208
+
209
+ Training & Evaluation. The Qwen2-VL-7B-Instruct [6] serves as the backbone LMM for Q-Eval-Score. All visual quality and alignment data from images and videos are combined for training. Training is conducted on 8 A100 GPUs for one epoch by default. For evaluation metrics, we use SRCC and PLCC, which measure the rank and linear correlation between predicted scores and MOSs. We propose evaluations at the Instance-level and Model-level which assess accuracy in ranking specific generated instances and generative models based on overall performance.
210
+
211
+ Competitors. Few models can simultaneously predict both visual quality and alignment. Thus we selected task-specific competitors for each sub-task: For visual quality, we include NIQE [46], CLIP-IQA [58], Q-Align [63], and IPCE [49] (the top method from the 'NTIRE 2024 Quality Assessment of AI-Generated Content Challenge' [42]). For alignment, we choose CLIPScore [21], BLIP2Score [38], ImageReward [66] and VQAScore [41] as the competitors. All models are trained and tested using their default recommended parameters and the corresponding train-test sets of the Q-Eval-100K dataset unless specified.
212
+
213
+ # 5.2. Discussion & General Findings
214
+
215
+ The general performance on the visual quality and alignment is exhibited in Table 2 and Table 3, from which we can draw several conclusions: 1) For visual quality, The proposed Q-Eval-Score outperforms all competitors, achiev-
216
+
217
+ Table 4. Ablation Study of Q-Eval-Score.
218
+
219
+ <table><tr><td rowspan="3">Model</td><td colspan="4">Q-Eval-Image (Quality)</td><td colspan="4">Q-Eval-Video (Quality)</td><td colspan="4">Q-Eval-Image (Alignment)</td><td colspan="4">Q-Eval-Video (Alignment)</td></tr><tr><td colspan="2">Instance-level</td><td colspan="2">Model-level</td><td colspan="2">Instance-level</td><td colspan="2">Model-level</td><td colspan="2">Instance-level</td><td colspan="2">Model-level</td><td colspan="2">Instance-level</td><td colspan="2">Model-level</td></tr><tr><td>SRCC</td><td>PLCC</td><td>SRCC</td><td>PLCC</td><td>SRCC</td><td>PLCC</td><td>SRCC</td><td>PLCC</td><td>SRCC</td><td>PLCC</td><td>SRCC</td><td>PLCC</td><td>SRCC</td><td>PLCC</td><td>SRCC</td><td>PLCC</td></tr><tr><td>w/o SFT Training</td><td>0.071</td><td>0.096</td><td>0.257</td><td>0.136</td><td>0.018</td><td>0.008</td><td>0.262</td><td>0.314</td><td>0.529</td><td>0.423</td><td>0.560</td><td>0.705</td><td>0.464</td><td>0.437</td><td>0.567</td><td>0.478</td></tr><tr><td>w/o Context Prompt</td><td>0.504</td><td>0.509</td><td>0.600</td><td>0.756</td><td>0.598</td><td>0.591</td><td>0.571</td><td>0.638</td><td>0.805</td><td>0.776</td><td>0.960</td><td>0.963</td><td>0.588</td><td>0.597</td><td>0.601</td><td>0.602</td></tr><tr><td>w/o CE Loss</td><td>0.652</td><td>0.622</td><td>0.932</td><td>0.910</td><td>0.247</td><td>0.249</td><td>0.071</td><td>0.239</td><td>0.804</td><td>0.776</td><td>0.948</td><td>0.961</td><td>0.604</td><td>0.626</td><td>0.642</td><td>0.593</td></tr><tr><td>w/o MSE Loss</td><td>0.665</td><td>0.673</td><td>0.933</td><td>0.941</td><td>0.595</td><td>0.583</td><td>0.690</td><td>0.712</td><td>0.795</td><td>0.763</td><td>0.954</td><td>0.958</td><td>0.580</td><td>0.605</td><td>0.624</td><td>0.599</td></tr><tr><td>Q-Eval-Score</td><td>0.732</td><td>0.731</td><td>0.943</td><td>0.949</td><td>0.601</td><td>0.609</td><td>0.762</td><td>0.814</td><td>0.822</td><td>0.802</td><td>0.964</td><td>0.969</td><td>0.607</td><td>0.634</td><td>0.648</td><td>0.605</td></tr></table>
220
+
221
+ Table 5. Performance comparison on the alignment aspect of Q-Eval-100K on the long prompt subset, where w/o V2S and w V2S represents the proposed Q-Eval-Score model with and without the Vague-to-Specific strategy respectively.
222
+
223
+ <table><tr><td rowspan="3">Model
224
+ (Align-
225
+ ment)</td><td colspan="2">Q-Eval-Image (Long)</td><td colspan="2">Q-Eval-Video (Long)</td></tr><tr><td colspan="2">Instance-level</td><td colspan="2">Instance-level</td></tr><tr><td>SRCC</td><td>PLCC</td><td>SRCC</td><td>PLCC</td></tr><tr><td>CLIPScore</td><td>0.533</td><td>0.547</td><td>0.359</td><td>0.367</td></tr><tr><td>BLIP2Score</td><td>0.620</td><td>0.636</td><td>0.392</td><td>0.395</td></tr><tr><td>VQAScore</td><td>0.432</td><td>0.325</td><td>0.344</td><td>0.350</td></tr><tr><td>w/o V2S</td><td>0.591</td><td>0.599</td><td>0.480</td><td>0.470</td></tr><tr><td>w V2S</td><td>0.620</td><td>0.623</td><td>0.517</td><td>0.512</td></tr></table>
226
+
227
+ Table 6. Cross-dataset validation performance on GenAI-Bench. The Q-Eval-Score is trained on the Q-Eval-100K and then validated on GenAI-Bench. * indicates using default weight.
228
+
229
+ <table><tr><td rowspan="2">Model
230
+ (Alignment)</td><td colspan="2">GenAI-Bench (Image)</td><td colspan="2">GenAI-Bench (Video)</td></tr><tr><td>SRCC</td><td>PLCC</td><td>SRCC</td><td>PLCC</td></tr><tr><td>CLIPScore*</td><td>0.174</td><td>0.169</td><td>0.269</td><td>0.269</td></tr><tr><td>BLIP2Score*</td><td>0.221</td><td>0.209</td><td>0.289</td><td>0.275</td></tr><tr><td>VQAScore*</td><td>0.556</td><td>0.502</td><td>0.527</td><td>0.505</td></tr><tr><td>CLIPScore</td><td>0.681</td><td>0.670</td><td>0.610</td><td>0.628</td></tr><tr><td>BLIP2Score</td><td>0.687</td><td>0.679</td><td>0.679</td><td>0.705</td></tr><tr><td>ImageReward</td><td>0.664</td><td>0.656</td><td>0.663</td><td>0.684</td></tr><tr><td>Q-Eval-Score</td><td>0.757</td><td>0.747</td><td>0.717</td><td>0.714</td></tr></table>
231
+
232
+ ing the best performance overall. The decline in video performance is likely due to the 1fps frame sampling method, which causes a loss of temporal information, leading to inaccurate estimations. Despite this, at the instance-level, Q-Eval-Score still leads the second-best competitor (Q-Align) by $10\%$ on video instance-level SRCC. 2) For alignment, Q-Eval-Score also demonstrates a significant lead in alignment, outperforming competitors by $6\%$ in image instance-level SRCC and $12\%$ in video instance-level SRCC. Additionally, the substantial performance improvements seen in the trained competitors suggest that Q-Eval-100K provides valuable knowledge for alignment evaluation. 3) In comparison to alignment, Q-Eval-Score's performance in visual quality is notably lower, indicating that predicting visual quality is more challenging. This is likely because alignment evaluation is more straightforward and objective, while visual quality perception is more complex and subjective, making it harder to assess. Overall, the proposed Q-Eval-Score exhibits exceptional potential in both visual quality and alignment, achieving over 0.94 performance at the image model-level, closely aligning with human evaluations. This strong performance not only highlights the robustness of the model but also underscores its promising ability to serve as an effective evaluation metric.
233
+
234
+ # 5.3. Further Experiments
235
+
236
+ I) Ablation Study. We conduct a detailed ablation study to assess the contribution of proposed strategies and CE/MSE loss. The results are presented in Table 4. It is clear that each of the strategies we proposed and both CE/MSE loss make a significant contribution to the final outcome.
237
+
238
+ II) Long Prompt. To test the Vague-to-Specific strategy for long prompt alignment, we select a subset of 5,000 instances from Q-Eval-100K that contain long prompts (over 25 words) for testing, performance shown in Table 5. Due to the limited data size, we present only the instance-level performance. The results clearly show that the Vague-to-Specific strategy significantly improves performance, indicating the effectiveness of handling long prompt alignment.
239
+
240
+ III) Cross-dataset Validation. To demonstrate the value of the Q-Eval-100K dataset, we conduct a cross-dataset validation, with performance results shown in Table 6. It is important to note that instances generated from prompts in GenAI-Bench are excluded from this validation. The results clearly show that models trained on Q-Eval-100K significantly outperform the current state-of-the-art VQAScore on GenAI-Bench by a large margin, providing strong evidence of the generalization value of the Q-Eval-100K dataset.
241
+
242
+ # 6. Conclusion
243
+
244
+ In conclusion, we introduce Q-Eval-100K, the largest text-to-vision evaluation dataset to date, featuring 100K instances and 960K human annotations for assessing visual quality and alignment. We also present Q-Eval-Score, a unified evaluation framework that leverages this dataset to provide separate scores for each dimension. Experimental results show that Q-Eval-Score outperforms existing methods, demonstrating its potential for more reliable, comprehensive assessments of text-to-vision models. Looking ahead, we hope this work can lay a strong foundation for further advancements in text-to-vision model promotion and real-world evaluation applications.
245
+
246
+ # Acknowledgment
247
+
248
+ The work was supported in part by the National Natural Science Foundation of China under Grant 62301310, Grant 623B2073, and in part by Sichuan Science and Technology Program under Grant 2024NSFSC1426.
249
+
250
+ # References
251
+
252
+ [1] Recommendation 500-10: Methodology for the subjective assessment of the quality of television pictures. ITU-R Rec. BT.500, 2000. 3
253
+ [2] Luma AI. Dream machine: Ai video generator. https://lumalabs.ai/dream-machine, 2024. Accessed: 2024-11-03. 3
254
+ [3] PixVerse AI. Pixverse: Ai video creation platform. https://pixverse.ai/, 2024. Accessed: 2024-11-03. 3
255
+ [4] Stability AI. Stable diffusion 3.0. https://stability.ai, 2023. Available through the Stability AI Developer Platform. 3
256
+ [5] Jinze Bai, Shuai Bai, Yunfei Chu, Zeyu Cui, Kai Dang, Xiaodong Deng, Yang Fan, Wenbin Ge, Yu Han, Fei Huang, Binyuan Hui, Luo Ji, Mei Li, Junyang Lin, Runji Lin, Dayiheng Liu, Gao Liu, Chengqiang Lu, Keming Lu, Jianxin Ma, Rui Men, Xingzhang Ren, Xuancheng Ren, Chuanqi Tan, Sinan Tan, Jianhong Tu, Peng Wang, Shijie Wang, Wei Wang, Shengguang Wu, Benfeng Xu, Jin Xu, An Yang, Hao Yang, Jian Yang, Shusheng Yang, Yang Yao, Bowen Yu, Hongyi Yuan, Zheng Yuan, Jianwei Zhang, Xingxuan Zhang, Yichang Zhang, Zhenru Zhang, Chang Zhou, Jingren Zhou, Xiaohuan Zhou, and Tianhang Zhu. Qwen technical report. arXiv preprint arXiv:2309.16609, 2023. 7
257
+ [6] Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A versatile vision-language model for understanding, localization, text reading, and beyond. arXiv preprint arXiv:2308.12966, 2023. 6, 7, 1
258
+ [7] Shane Barratt and Rishi Sharma. A note on the inception score. arXiv preprint arXiv:1801.01973, 2018. 3
259
+ [8] Andreas Blattmann, Tim Dockhorn, Sumith Kulal, Daniel Mendelevitch, Maciej Kilian, Dominik Lorenz, Yam Levi, Zion English, Vikram Voleti, Adam Letts, et al. Stable video diffusion: Scaling latent video diffusion models to large datasets. arXiv preprint arXiv:2311.15127, 2023. 3
260
+ [9] Dreamina by CapCut. Dreamina, 2023. Available online at https://dreamina.capcut.com/.3
261
+ [10] Junsong Chen, Jincheng Yu, Chongjian Ge, Lewei Yao, Enze Xie, Yue Wu, Zhongdao Wang, James Kwok, Ping Luo, Huchuan Lu, et al. Pixart-alpha: Fast training of diffusion transformer for photorealistic text-to-image synthesis. arXiv preprint arXiv:2310.00426, 2023. 3
262
+ [11] Xinlei Chen, Hao Fang, Tsung-Yi Lin, Ramakrishna Vedantam, Saurabh Gupta, Piotr Dólar, and C Lawrence Zitnick. Microsoft COCO captions: Data collection and evaluation server. CoRR, abs/1504.00325, 2015. 3
263
+ [12] Zijian Chen, Wei Sun, Haoning Wu, Zicheng Zhang, Jun Jia, Xiongkuo Min, Guangtao Zhai, and Wenjun Zhang. Exploring the naturalness of ai-generated images. arXiv preprint arXiv:2312.05476, 2023. 2, 3
264
+ [13] Zijian Chen, Wei Sun, Yuan Tian, Jun Jia, Zicheng Zhang, Jiarui Wang, Ru Huang, Xiongkuo Min, Guangtao Zhai, and Wenjun Zhang. Gaia: Rethinking action quality assessment for ai-generated videos. arXiv preprint arXiv:2406.06087, 2024. 2, 3
265
+
266
+ [14] Jaemin Cho, Yushi Hu, Roopal Garg, Peter Anderson, Ranjay Krishna, Jason Baldridge, Mohit Bansal, Jordi Pont-Tuset, and Su Wang. Davidsonian scene graph: Improving reliability in fine-grained evaluation for text-image generation. arXiv preprint arXiv:2310.18235, 2023. 1, 3
267
+ [15] Paul F Christiano, Jan Leike, Tom Brown, Miljan Martic, Shane Legg, and Dario Amodei. Deep reinforcement learning from human preferences. Advances in neural information processing systems, 30, 2017. 3
268
+ [16] Alibaba Cloud. Wanx. https://tongyi.aliyun.com/wanxiang/, 2024. Accessed: 2024-11-07. 3
269
+ [17] Z. Feng, Z. Zhang, X. Yu, Y. Fang, L. Li, X. Chen, and H. Wang. Ernie-vilg 2.0: Improving text-to-image diffusion model with knowledge-enhanced mixture-of-denoising-experts. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10135-10145, 2023. 3
270
+ [18] Peng Gao, Le Zhuo, Chris Liu, , Ruoyi Du, Xu Luo, Longtian Qiu, Yuhang Zhang, et al. Lumina-t2x: Transforming text into any modality, resolution, and duration via flow-based large diffusion transformers. arXiv preprint arXiv:2405.05945, 2024. 3
271
+ [19] Anastasis Germanidis. Gen-2: Generate novel videos with text, images or video clips. https://runwayml.com/research/gen-2, 2023. Accessed: 2024-11-07. 3
272
+ [20] Xuan He, Dongfu Jiang, Ge Zhang, Max Ku, Achint Soni, Sherman Siu, Haonan Chen, Abhranil Chandra, Ziyan Jiang, Aaran Arulraj, Kai Wang, Quy Duc Do, Yuansheng Ni, Bohan Lyu, Yaswanth Narsupalli, Rongqi Fan, Zhiheng Lyu, Yuchen Lin, and Wenhu Chen. Videoscore: Building automatic metrics to simulate fine-grained human feedback for video generation. ArXiv, abs/2406.15252, 2024. 2, 3
273
+ [21] Jack Hessel, Ari Holtzman, Maxwell Forbes, Ronan Le Bras, and Yejin Choi. Clipscore: A reference-free evaluation metric for image captioning. arXiv preprint arXiv:2104.08718, 2021. 3, 7
274
+ [22] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. In Advances in neural information processing systems, pages 6626-6637, 2017. 3
275
+ [23] Yushi Hu, Benlin Liu, Jungo Kasai, Yizhong Wang, Mari Ostendorf, Ranjay Krishna, and Noah A Smith. Tifa: Accurate and interpretable text-to-image faithfulness evaluation with question answering. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 20406-20417, 2023. 3
276
+ [24] Kaiyi Huang, Kaiyue Sun, Enze Xie, Zhenguo Li, and Xihui Liu. T2i-compbench: A comprehensive benchmark for open-world compositional text-to-image generation. Advances in Neural Information Processing Systems, 36:78723-78747, 2023. 3
277
+ [25] Ziqi Huang, Yinan He, Jiashuo Yu, Fan Zhang, Chenyang Si, Yuming Jiang, Yuanhan Zhang, Tianxing Wu, Qingyang Jin, Nattapol Chanpaisit, et al. Vbench: Comprehensive benchmark suite for video generative models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 21807-21818, 2024. 2, 3
278
+
279
+ [26] Everypixel Journal. Ai image statistics for 2024: How much content was created by ai. *Everypixel*, 2024. Accessed: 2024-10-14. 1
280
+ [27] Seungone Kim, Jamin Shin, Yejin Cho, Joel Jang, Shayne Longpre, Hwaran Lee, Sangdoo Yun, Seongjin Shin, Sungdong Kim, James Thorne, et al. Prometheus: Inducing fine-grained evaluation capability in language models. arXiv preprint arXiv:2310.08491, 2023. 1, 3
281
+ [28] Yuval Kirstain, Adam Polyak, Uriel Singer, Shahbuland Martiana, Joe Penna, and Omer Levy. Pick-a-pic: An open dataset of user preferences for text-to-image generation. Advances in Neural Information Processing Systems, 36: 36652-36663, 2023. 1, 2, 3
282
+ [29] Tengchuan Kou, Xiaohong Liu, Zicheng Zhang, Chunyi Li, Haoning Wu, Xiongkuo Min, Guangtao Zhai, and Ning Liu. Subjective-aligned dateset and metric for text-to-video quality assessment. arXiv preprint arXiv:2403.11956, 2024. 2, 3
283
+ [30] Max Ku, Dongfu Jiang, Cong Wei, Xiang Yue, and Wenhu Chen. Viescore: Towards explainable metrics for conditional image synthesis evaluation. arXiv preprint arXiv:2312.14867, 2023. 1, 3
284
+ [31] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2023. Accessed: 2024-10-22. 3
285
+ [32] Pika Labs. Pika: Ai video generation platform. https://pika.art/, 2024. Accessed: 2024-11-03. 3
286
+ [33] Lucie Lévêque, Meriem Outtas, Hantao Liu, and Lu Zhang. Comparative study of the methodologies used for subjective medical image quality assessment. Physics in Medicine & Biology, 66(15):15TR02, 2021. 3
287
+ [34] Baiqi Li, Zhiqiu Lin, Deepak Pathak, Jiayao Li, Yixin Fei, Kewen Wu, Tiffany Ling, Xide Xia, Pengchuan Zhang, Graham Neubig, et al. Genai-bench: Evaluating and improving compositional text-to-visual generation. arXiv preprint arXiv:2406.13743, 2024. 2, 3, 1
288
+ [35] Chunyi Li, Zicheng Zhang, Haoning Wu, Wei Sun, Xiongkuo Min, Xiaohong Liu, Guangtao Zhai, and Weisi Lin. AGIQA-3K: An open database for ai-generated image quality assessment. CoRR, 2306.04717, 2023. 1, 2, 3
289
+ [36] Chunyi Li, Tengchuan Kou, Yixuan Gao, Yuqin Cao, Wei Sun, Zicheng Zhang, Yingjie Zhou, Zhichao Zhang, Weixia Zhang, Haoning Wu, et al. Aigia-20k: A large database for ai-generated image quality assessment. arXiv preprint arXiv:2404.03407, 2(3):5, 2024. 2, 3
290
+ [37] Chunyi Li, Haoning Wu, Zicheng Zhang, Hongkun Hao, Kaiwei Zhang, Lei Bai, Xiaohong Liu, Xiongkuo Min, Weisi Lin, and Guangtao Zhai. Q-refine: A perceptual quality refiner for ai-generated image, 2024. 1
291
+ [38] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In International conference on machine learning, pages 19730–19742. PMLR, 2023. 7
292
+ [39] Zhimin Li, Jianwei Zhang, Qin Lin, Jiangfeng Xiong, Yanxin Long, Xinchi Deng, Yingfang Zhang, Xingchao Liu, Minbin Huang, Zedong Xiao, et al. Hunyuan-dit: A powerful
293
+
294
+ multi-resolution diffusion transformer with fine-grained chines understanding. arXiv preprint arXiv:2405.08748, 2024. 3
295
+ [40] Youwei Liang, Junfeng He, Gang Li, Peizhao Li, Arseniy Klimovskiy, Nicholas Carolan, Jiao Sun, Jordi Pont-Tuset, Sarah Young, Feng Yang, et al. Rich human feedback for text-to-image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 19401-19411, 2024. 2, 3
296
+ [41] Zhiqiu Lin, Deepak Pathak, Baiqi Li, Jiayao Li, Xide Xia, Graham Neubig, Pengchuan Zhang, and Deva Ramanan. Evaluating text-to-visual generation with image-to-text generation. arXiv preprint arXiv:2404.01291, 2024. 1, 3, 5, 7
297
+ [42] Xiaohong Liu, Xiongkuo Min, Guangtao Zhai, Chunyi Li, Tengchuan Kou, Wei Sun, Haoning Wu, Yixuan Gao, Yuqin Cao, Zicheng Zhang, et al. Ntire 2024 quality assessment of ai-generated content challenge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6337-6362, 2024. 2, 3, 7
298
+ [43] Xin Ma, Yaohui Wang, Gengyun Jia, Xinyuan Chen, Zwei Liu, Yuan-Fang Li, Cunjian Chen, and Yu Qiao. Latte: Latent diffusion transformer for video generation. arXiv preprint arXiv:2401.03048, 2024. 3
299
+ [44] Kenneth Marino, Mohammad Rastegari, Ali Farhadi, and Roozbeh Mottaghi. Ok-vqa: A visual question answering benchmark requiring external knowledge. In Proceedings of the IEEE/cvf conference on computer vision and pattern recognition, pages 3195-3204, 2019. 3
300
+ [45] MidJourney. Midjourney. https://www.midjourney.com, 2023. Accessed: 2024-10-22. 3
301
+ [46] Anish Mittal, Rajiv Soundararajan, and Alan C. Bovik. Making a "completely blind" image quality analyzer. IEEE Signal Processing Letters, 20(3):209-212, 2013. 7
302
+ [47] Yasumasa Onoe, Sunayana Rane, Zachary Berger, Yonatan Bitton, Jaemin Cho, Roopal Garg, Alexander Ku, Zarana Parekh, Jordi Pont-Tuset, Garrett Tanzer, et al. Docci: Descriptions of connected and contrasting images. arXiv preprint arXiv:2404.19753, 2024. 3, 1
303
+ [48] OpenAI. Dall-e 3. https://openai.com/dall-e-3, 2023. Accessed: 2024-10-22. 3
304
+ [49] Fei Peng, Huiyuan Fu, Anlong Ming, Chuanming Wang, Huadong Ma, Shuai He, Zifei Dou, and Shu Chen. Aigc image quality assessment via image-prompt correspondence. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, pages 6432-6441, 2024. 7
305
+ [50] Maria Perez-Ortiz, Aliaksei Mikhailiuk, Emin Zerman, Vedad Hulusic, Giuseppe Valenzise, and Rafat K Mantiuk. From pairwise comparisons and rating to a unified quality scale. IEEE Transactions on Image Processing, 29:1139–1151, 2019. 3
306
+ [51] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. Sdxl: Improving latent diffusion models for high-resolution image synthesis. arXiv preprint arXiv:2307.01952, 2023. 3
307
+
308
+ [52] Runway. Introducing gen-3 alpha: A new frontier for video generation. https://runwayml.com/research/introducing-gen-3-alpha, 2024. Accessed: 2024-11-07. 3
309
+ [53] Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, Xi Chen, and Xi Chen. Improved techniques for training gans. In Advances in neural information processing systems, pages 2234-2242, 2016. 3
310
+ [54] Kuaishou Team. Kling ai. https://klingai.io/, 2024.3
311
+ [55] Kolars Team. Kolars: Effective training of diffusion model for photorealistic text-to-image synthesis. arXiv preprint, 2024.3
312
+ [56] Vidu AI Team. Vidu ai. https://www.vidustudio/zh, 2024. Accessed: 2024-11-03. 3
313
+ [57] Techreport. Ai image generator market statistics in 2024. Techreport, 2024. Accessed: 2024-10-14. 1
314
+ [58] Jianyi Wang, Kelvin C. K. Chan, and Chen Change Loy. Exploring clip for assessing the look and feel of images, 2022. 3, 7
315
+ [59] Jiarui Wang, Huiyu Duan, Jing Liu, Shi Chen, Xiongkuo Min, and Guangtao Zhai. Aigciqa2023: A large-scale image quality assessment database for ai generated images: from the perspectives of quality, authenticity and correspondence. In CAAI International Conference on Artificial Intelligence, pages 46-57. Springer, 2023. 2, 3
316
+ [60] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837, 2022. 5
317
+ [61] Haoning Wu, Zicheng Zhang, Erli Zhang, Chaofeng Chen, Liang Liao, Annan Wang, Chunyi Li, Wenxiu Sun, Qiong Yan, Guangtao Zhai, and Weisi Lin. Q-bench: A benchmark for general-purpose foundation models on low-level vision. In ICLR, pages 1-13, 2023. 1
318
+ [62] Haoning Wu, Zicheng Zhang, Erli Zhang, Chaofeng Chen, Liang Liao, Annan Wang, Kaixin Xu, Chunyi Li, Jingwen Hou, Guangtao Zhai, et al. Q-Instruct: Improving low-level visual abilities for multi-modality foundation models. IEEE CVPR, pages 1-16, 2024.
319
+ [63] Haoning Wu, Zicheng Zhang, Weixia Zhang, Chaofeng Chen, Liang Liao, Chunyi Li, Yixuan Gao, Annan Wang, Erli Zhang, Wenxiu Sun, Qiong Yan, Xiongkuo Min, Guangtao Zhai, and Weisi Lin. Q-align: Teaching lmms for visual scoring via discrete text-defined levels. In ICML2024, 2024. 3, 5, 6, 7
320
+ [64] Xiaoshi Wu, Yiming Hao, Keqiang Sun, Yixiong Chen, Feng Zhu, Rui Zhao, and Hongsheng Li. Human preference score v2: A solid benchmark for evaluating human preferences of text-to-image synthesis. arXiv preprint arXiv:2306.09341, 2023. 1, 2, 3
321
+ [65] Xiaoshi Wu, Keqiang Sun, Feng Zhu, Rui Zhao, and Hongsheng Li. Better aligning text-to-image models with human preference. CoRR, abs/2303.14420, 2023. 2
322
+ [66] Jiazheng Xu, Xiao Liu, Yuchen Wu, Yuxuan Tong, Qinkai Li, Ming Ding, Jie Tang, and Yuxiao Dong. ImageRe
323
+
324
+ ward: Learning and evaluating human preferences for text-to-image generation. CoRR, abs/2304.05977, 2023. 1, 2, 3, 7
325
+ [67] Zhuoyi Yang, Jiayan Teng, Wendi Zheng, Ming Ding, Shiyu Huang, Jiazheng Xu, Yuanming Yang, Wenyi Hong, Xiaohan Zhang, Guanyu Feng, et al. Cogvideox: Text-to-video diffusion models with an expert transformer. arXiv preprint arXiv:2408.06072, 2024.3
326
+ [68] Jiquan Yuan, Fanyi Yang, Jihe Li, Xinyan Cao, Jinming Che, Jinlong Lin, and Xixin Cao. Pku-aigiaq-4k: A perceptual quality assessment database for both text-to-image and image-to-image ai-generated images. arXiv preprint arXiv:2404.18409, 2024. 2, 3
327
+ [69] Emin Zerman, Vedad Hulusic, Giuseppe Valenzise, Rafal K Mantiuk, and Frédéric Dufaux. The relation between mos and pairwise comparisons and the importance of cross-content comparisons. *Electronic Imaging*, 30:1-6, 2018. 3
328
+ [70] Richard Zhang, Phillip Isola, Alexei A. Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In IEEE CVPR, pages 586-595, 2018. 3
329
+ [71] Xinlu Zhang, Yujie Lu, Weizhi Wang, An Yan, Jun Yan, Lianke Qin, Heng Wang, Xifeng Yan, William Yang Wang, and Linda Ruth Petzold. Gpt-4v (ision) as a generalist evaluator for vision-language tasks. arXiv preprint arXiv:2311.01361, 2023. 1, 3
330
+ [72] Zicheng Zhang, Ziheng Jia, Haoning Wu, Chunyi Li, Zijian Chen, Yingjie Zhou, Wei Sun, Xiaohong Liu, Xiongkuo Min, Weisi Lin, et al. Q-bench-video: Benchmarking the video quality understanding of Imms. arXiv preprint arXiv:2409.20063, 2024. 2
331
+ [73] Zicheng Zhang, Haoning Wu, Zhongpeng Ji, Chunyi Li, Erli Zhang, Wei Sun, Xiaohong Liu, Xiongkuo Min, Fengyu Sun, Shangling Jui, et al. Q-boost: On visual quality assessment ability of low-level multi-modality foundation models. In 2024 IEEE International Conference on Multimedia and Expo Workshops (ICMEW), pages 1-6. IEEE, 2024. 1, 6
332
+ [74] Zicheng Zhang, Haoning Wu, Chunyi Li, Yingjie Zhou, Wei Sun, Xiongkuo Min, Zijian Chen, Xiaohong Liu, Weisi Lin, and Guangtao Zhai. A-bench: Are Imms masters at evaluating ai-generated images? arXiv preprint arXiv:2406.03070, 2024. 2, 3
333
+ [75] Zicheng Zhang, Haoning Wu, Erli Zhang, Guangtao Zhai, and Weisi Lin. Q-bench+: A benchmark for multi-modal foundation models on low-level vision from single images to pairs. arXiv preprint arXiv:2402.07116, 2024. 2
334
+ [76] Zicheng Zhang, Haoning Wu, Yingjie Zhou, Chunyi Li, Wei Sun, Chaofeng Chen, Xiongkuo Min, Xiaohong Liu, Weisi Lin, and Guangtao Zhai. Lmm-pcqa: Assisting point cloud quality assessment with lmm. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 7783–7792, 2024. 2
335
+ [77] Zicheng Zhang, Yingjie Zhou, Chunyi Li, Baixuan Zhao, Xiaohong Liu, and Guangtao Zhai. Quality assessment in the era of large models: A survey. arXiv preprint arXiv:2409.00031, 2024. 2, 3
336
+
337
+ # Q-Eval-100K: Evaluating Visual Quality and Alignment Level for Text-to-Vision Content
338
+
339
+ Supplementary Material
340
+
341
+ # 7. Dataset Construction Details
342
+
343
+ In this section, we mainly talk about the details of prompts collection.
344
+
345
+ # 7.1. Prompts Collection
346
+
347
+ The prompt collection comprises two sources:
348
+
349
+ - Internally Constructed Prompts, which is based on internal capability requirement of Q-Eval-100K.
350
+ - Open-Source Prompts, which is based on other text-to-vision alignment evaluation datasets, including GenAIbench [34] and Docci [47]. GenAIbench features comprehensive prompt designs, while Docci provides longer prompts, making it suitable for evaluating long-prompt descriptions.
351
+
352
+ # 7.2. Internally Constructed Prompts
353
+
354
+ - Manual Construction: Data is manually created by searching for commonly used prompts and rewriting them to align with the distribution of specific capabilities.
355
+ - GPT-4 Augmentation: GPT-4 is used to expand the dataset for specific capabilities. This process involves leveraging a few manually constructed examples and applying a Chain-of-Thought (CoT) approach. GPT-4 generates prompts based on given definitions and examples, which are then filtered and refined manually.
356
+
357
+ # Example GPT-4 Prompt Generation Instruction:
358
+
359
+ You are an expert at crafting text-to-image prompts. I need prompts for text-to-image models based on the following category labels. Each label is explained with a description, the text before the ';' is the label, and the text after the ';' provides details. Use your imagination and creativity to generate relevant English prompts. The prompt length should be between [len_min] and [len_max]. Avoid extra content, only output prompts.
360
+
361
+ # 7.3. Prompt Designing
362
+
363
+ As shown in Table 7, the prompt design focuses on three main aspects: Entity Generation, Entity Attribute Generation, and Interaction Capability. 1) Entity generation targets the primary entities (people, objects, etc.) to be generated. 2) Entity attribute generation emphasizes the attributes (clothing, color, material, visual quality, etc.) of the entities. 3) Interaction capability focuses on the interactions between the generated entities and other entities or the background, such as their spatial relationships and actions.
364
+
365
+ Table 7. Detailed Descriptions of Entity Generation, Entity Attribute Generation, and Interaction Ability
366
+
367
+ <table><tr><td>Category</td><td>Subcategory</td><td>Count</td></tr><tr><td rowspan="8">Entity Generation</td><td>Simple Entity Generation</td><td>1439</td></tr><tr><td>Simple Human Generation</td><td>/</td></tr><tr><td>Simple Object Generation</td><td>/</td></tr><tr><td>Other Simple Entity Generation</td><td>/</td></tr><tr><td>Complex Entity Generation</td><td>1729</td></tr><tr><td>Character Information Generation</td><td>/</td></tr><tr><td>Text and Symbol Generation</td><td>/</td></tr><tr><td>Chart Generation</td><td>/</td></tr><tr><td rowspan="32">Entity Attribute Generation</td><td>Basic Entity Attributes</td><td>1656</td></tr><tr><td>Entity Shape Generation</td><td>/</td></tr><tr><td>Entity Position Generation</td><td>/</td></tr><tr><td>Entity Color Generation</td><td>/</td></tr><tr><td>Entity State Generation</td><td>/</td></tr><tr><td>Other Entity Attributes Generation</td><td>/</td></tr><tr><td>Person and Animal Attributes Generation</td><td>1500</td></tr><tr><td>Emotion Generation</td><td>/</td></tr><tr><td>Action Generation</td><td>/</td></tr><tr><td>Specific Age Person Generation</td><td>/</td></tr><tr><td>Specific Gender Person Generation</td><td>/</td></tr><tr><td>Other Person and Animal Attributes</td><td>/</td></tr><tr><td>Portrait Generation</td><td>531</td></tr><tr><td>Simple Portrait Generation</td><td>/</td></tr><tr><td>Complex Portrait Generation</td><td>/</td></tr><tr><td>Scene and Theme Generation</td><td>2450</td></tr><tr><td>Theme Generation</td><td>/</td></tr><tr><td>Scene Generation</td><td>/</td></tr><tr><td>Style Generation</td><td>294</td></tr><tr><td>Basic Visual Attributes Generation</td><td>321</td></tr><tr><td>Image Sharpness Generation</td><td>/</td></tr><tr><td>Exposure Generation</td><td>/</td></tr><tr><td>Lighting Generation</td><td>/</td></tr><tr><td>Contrast Generation</td><td>/</td></tr><tr><td>Color Saturation Generation</td><td>/</td></tr><tr><td>Noise Level Generation</td><td>/</td></tr><tr><td>Composition Generation</td><td>/</td></tr><tr><td>Color Balance Generation</td><td>/</td></tr><tr><td>Depth of Field Generation</td><td>/</td></tr><tr><td>Perspective Generation</td><td>/</td></tr><tr><td>Camera Angle Generation</td><td>/</td></tr><tr><td>Other Basic Visual Attributes Generation</td><td>/</td></tr><tr><td rowspan="4">Interaction Ability</td><td>Interacting Multi-Entity Generation</td><td>1729</td></tr><tr><td>Sequential Relationship Multi-Entity Generation</td><td>/</td></tr><tr><td>Causal Relationship Multi-Entity Generation</td><td>/</td></tr><tr><td>Spatial Relationship Multi-Entity Generation</td><td>/</td></tr></table>
368
+
369
+ # 8. Long Prompt Split
370
+
371
+ We use Qwen-VL-72B-Instruct [6] to help summarize the long prompt and split the long prompt into short sentences. Specifically, the prompt is designed as follows:
372
+
373
+ # Summarize Prompt
374
+
375
+ User: Please shorten the prompt to between 15 and 25 words, retaining the main information and ignoring details, specifically the characters, attributes, actions, and scenes. The prompt is as follows [Prompt].
376
+
377
+ # Split Prompt
378
+
379
+ User: Split the prompt into three or fewer shorter prompts, with each short prompt describing one aspect of the original long prompt's subject and should be fewer than 15 words. The prompt is as follows [Prompt].
380
+
381
+ ![](images/d7d66db3086833835da490af7e7b1e71abc4459a5dfeafbe4e8914bdedee1d3f.jpg)
382
+ (a) Overall performance on Visual Quality.
383
+
384
+ ![](images/6df52c8236c17b1f804f5dd144bf56b86177d30f693fb34848a93f1759062233.jpg)
385
+ (b) Overall performance on Alignment.
386
+
387
+ ![](images/561ea3a0f1b69db6e78c6b9ebba2c67a759aca88838c90a8611c638331639944.jpg)
388
+ Figure 7. Radar charts of the overall performance on the Visual Quality and Alignment aspects on Q-Eval-100K, where IR, IP, MR, MP indicate Instance-level SRCC, Instance-level PLCC, Model-level SRCC, Model-level PLCC, and -i, -v represents image and video respectively.
389
+
390
+ ![](images/a6d8319a4ca49f89828075637d8df6c17bcfe137fb7bc59cb28d5cf14c40a02f.jpg)
391
+
392
+ ![](images/036d6bcc5efd3618ac88c19c8e80d8a556b1e0f6808527bb3a3a0797cb635191.jpg)
393
+
394
+ ![](images/aee364ce2805e7334b0a779c93850e0349aa8787977c983cddd4dadf7a4ed729.jpg)
395
+ Figure 8. Visualization comparison results.
396
+
397
+ ![](images/a0f2c6ee5209bea8b0601d75eac4275febe129efe3ef518dd7f48390d5ccbc9d.jpg)
398
+
399
+ ![](images/0b894e7fb48a82fb7ea1f4510616eb577774da4bda8d7b9533032baf530465a9.jpg)
400
+
401
+ ![](images/8c5ec619bf2f4fa5d88f825dfb3a3e3e5d2f295807c0466dc451c2315b7844e8.jpg)
402
+ Figure 9. Variance probability distributions for images/videos of Q-Eval-100K respectively.
403
+
404
+ ![](images/c1f0a74717d5b2eac1f9695fb37a62c6718164a8ab89f0fb8f5e203c8a905c22.jpg)
405
+
406
+ # 8.1. Subjective experiment details:
407
+
408
+ 1) Each instance in the training and test sets is rated by at least 3 and 12 individuals on average. 2) We ensure raters' diversity by employing raters from a wide age range (18-52) and selecting raters from various professional backgrounds. Each rater annotates a maximum of 30 instances at a time, followed by a mandatory 30-minute break. 3) Perfect-score cases are rated by 12 individuals first, then reviewed and adjusted by a group of 5 experts. 4) Given the scale of Q-Eval100K (the largest AIGC QA dataset with MOS at the cost of about 150,000 US dollars in total), involving 15 raters per instance as suggested by ITU [1] would be impractical due to time and cost constraints. To preserve the dataset's scale (crucial for LMM training under scaling laws), we reduce the number of raters and implement a 'Sample and Scrutinize' approach to maintain annotation quality. 5) The variance distribution of instance annotations is shown in Fig. 9, where most instances have a variance below 0.3.
409
+
410
+ # 9. Performance Details
411
+
412
+ # 9.1. Radar Charts of Overall Performance
413
+
414
+ To provide a comprehensive overview of the performance, we present the radar charts in Fig. 7. The key observations are as follows:
415
+
416
+ - Visual Quality. The proposed Q-Eval-Score outperforms all competitors, achieving the highest overall performance. The slight decline in video performance can be attributed to the 1fps frame sampling method, which reduces temporal information and affects accuracy. Despite this limitation, Q-Eval-Score leads the second-best competitor (Q-Align) by a notable margin of $10\%$ in video instance-level SRCC.
417
+ - Alignment. Q-Eval-Score also excels in alignment evaluation, surpassing competitors by $6\%$ in image instance-level SRCC and $12\%$ in video instance-level SRCC. Furthermore, the significant performance gains seen in other trained models indicate that Q-Eval-100K serves as a valuable dataset for improving alignment evaluation methods.
418
+ - Comparison Between Tasks. The performance of Q-Eval-Score in visual quality evaluation is relatively lower than in alignment tasks, highlighting the greater complex
419
+
420
+ ity of predicting visual quality. Alignment evaluation is more straightforward and objective, while visual quality involves nuanced and subjective perception, making it more challenging to assess.
421
+
422
+ Overall, the proposed Q-Eval-Score demonstrates remarkable capabilities in both visual quality and alignment evaluation. With performance exceeding 0.94 in image model-level metrics, it aligns closely with human judgment. These results underscore the robustness of Q-Eval-Score and its potential as a highly effective evaluation metric.
423
+
424
+ # 9.2. More Cross-validation Results
425
+
426
+ We further select 4 datasets for cross-validation (See Table 8). AGIQA $^{\text{Quality}}$ [35] & T2VQA [29] are for visual quality, while AGIQA $^{\text{Align}}$ [35] & TIFA160 [23] are for text alignment. The results show good generalization ability of Q-Eval-Score. (best in bold)
427
+
428
+ Table 8. Cross-validation (All pre-trained on Q-Eval-100K).
429
+
430
+ <table><tr><td>Dataset</td><td>AGIQAquality</td><td>T2VQA</td><td>AGIQAAlign</td><td>TIFA160</td></tr><tr><td>Method</td><td>SRCC/PLCC</td><td>SRCC/PLCC</td><td>SRCC/PLCC</td><td>SRCC/PLCC</td></tr><tr><td>Q-Align</td><td>0.6581/0.6743</td><td>0.2539/0.2198</td><td>Inapplicable</td><td>Inapplicable</td></tr><tr><td>CLIPScore</td><td>Inapplicable</td><td>Inapplicable</td><td>0.5515/0.5627</td><td>0.5903/0.5952</td></tr><tr><td>BLIP2Score</td><td>Inapplicable</td><td>Inapplicable</td><td>0.6873/0.7085</td><td>0.7267/0.7468</td></tr><tr><td>Q-Eval-Score</td><td>0.7256/0.7248</td><td>0.4479/0.4498</td><td>0.7544/0.7432</td><td>0.7845/0.7954</td></tr></table>
431
+
432
+ # 9.3. Visualization Results
433
+
434
+ We provide additional comparison examples in Fig. 8 to offer a clearer understanding of the evaluation capabilities of different models. It is evident from these examples that both CLIPScore and BLIPScore struggle significantly in tasks such as recognizing text within images and accurately counting objects. These models often fail by assigning disproportionately high scores to results that do not align with the intended outputs, reflecting their limitations in fine-grained assessment. Furthermore, when dealing with complex scenarios involving long and detailed prompts, these models exhibit a consistent tendency to assign significantly lower alignment scores, likely due to their inability to effectively parse and match intricate contextual information. In contrast, Q-Eval-Score consistently demonstrates a much higher degree of accuracy and reliability in these challenging scenarios. These results further highlight the potential of Q-Eval-Score as a unified framework for evaluating text-to-vision generative models across diverse and demanding conditions.
435
+
436
+ # 10. Data Statement
437
+
438
+ Considering the large scale of the dataset and the complexity of the model, we are actively organizing and refining the content to ensure its quality and usability. We solemnly pledge to release the Q-Eval-100K dataset in carefully
439
+
440
+ planned batches, ensuring a comprehensive and systematic open-sourcing process that effectively supports community development. Furthermore, we confirm that the dataset has successfully passed ethical review, affirming our commitment to responsible AI practices. Alongside the dataset, we will also release Q-Eval-Score and provide continuous updates, ensuring the model remains aligned with the rapid advancements in generative AI.
441
+
442
+ # 11. Broader Impact and Limitations
443
+
444
+ # 11.1. Broader Impact
445
+
446
+ - Empowering Generative AI Applications. The development of comprehensive evaluation methods, such as Q-Eval-100K and Q-Eval-Score, directly supports these advancements by ensuring the quality and alignment of generated content, enabling its effective deployment.
447
+ - Driving Standardization in Evaluation. By introducing a unified framework for assessing visual quality and alignment, this work provides a benchmark for systematic evaluation. This standardization not only enhances the reliability of evaluations across diverse use cases but also fosters transparency in generative AI systems.
448
+ - Facilitating Improvements in Generative Models. The dataset and framework encourage the refinement of generative models by providing detailed feedback on visual quality and alignment. These insights guide iterative improvements, pushing the boundaries of what generative AI can achieve.
449
+
450
+ # 11.2. Limitations
451
+
452
+ - Subjectivity in Visual Quality Evaluation. While Q-Eval-Score aligns closely with human evaluations, the inherently subjective nature of visual quality perception may result in variability. Differences in individual preferences and cultural factors could affect the generalizability of the evaluation framework.
453
+ - Dependency on Human Annotations. The reliance on extensive human annotations for dataset creation introduces scalability issues and potential biases. Automating parts of this process without sacrificing quality remains an open challenge.
data/2025/2503_02xxx/2503.02357/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3d586000709c29a8dbee73246c587e01b8e2f36d7806180b0c8bb4e1a31544c
3
+ size 1273069
data/2025/2503_02xxx/2503.02357/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_02xxx/2503.02358/afddd584-7d0d-4efd-a372-e9f42b569ad5_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_02xxx/2503.02358/afddd584-7d0d-4efd-a372-e9f42b569ad5_model.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_02xxx/2503.02358/afddd584-7d0d-4efd-a372-e9f42b569ad5_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:454aa333881c8e2a0281eed0d6e843a70d4a18b3cda8e2418ca195cf100851ad
3
+ size 2629730
data/2025/2503_02xxx/2503.02358/full.md ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_02xxx/2503.02358/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99b160d5f44051922ca50e584183209e07c5ef513b5928be5beed9eff75548e4
3
+ size 1017482
data/2025/2503_02xxx/2503.02358/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_02xxx/2503.02390/635e71fe-44a1-404f-ae63-3692c9e475c1_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_02xxx/2503.02390/635e71fe-44a1-404f-ae63-3692c9e475c1_model.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_02xxx/2503.02390/635e71fe-44a1-404f-ae63-3692c9e475c1_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9865cfbd1f4b599da9b789d131f8640d4543593cbd1432fb1eef3f2bf35e2ef9
3
+ size 1525045
data/2025/2503_02xxx/2503.02390/full.md ADDED
@@ -0,0 +1,576 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ReSo: A Reward-driven Self-organizing LLM-based Multi-Agent System for Reasoning Tasks
2
+
3
+ Heng Zhou $^{1*}$ , Hejia Geng $^{2*}$ , Xiangyuan Xue $^{1}$ , Li Kang $^{1}$ , Yiran Qin $^{1}$ , Zhiyong Wang $^{3}$ , Zhenfei Yin $^{3,4\dagger}$ , Lei Bai $^{1\dagger}$ ,
4
+
5
+ $^{1}$ Shanghai Artificial Intelligence Laboratory, $^{2}$ Independent Researcher, $^{3}$ The University of Sydney, $^{4}$ Oxford University,
6
+
7
+ # Abstract
8
+
9
+ Multi-agent systems have emerged as a promising approach for enhancing the reasoning capabilities of large language models in complex problem-solving. However, current MAS frameworks are limited by poor flexibility and scalability, with underdeveloped optimization strategies. To address these challenges, we propose ReSo, which integrates task graph generation with a reward-driven two-stage agent selection process. The core of ReSo is the proposed Collaborative Reward Model, which can provide fine-grained reward signals for MAS cooperation for optimization. We also introduce an automated data synthesis framework for generating MAS benchmarks, without human annotations. Experimentally, ReSo matches or outperforms existing methods. ReSo achieves $33.7\%$ and $32.3\%$ accuracy on Math-MAS and SciBench-MAS SciBench, while other methods completely fail. The code and data are available at Reso.
10
+
11
+ # 1 Introduction
12
+
13
+ Increasing inference time has emerged as a critical method to enhance the reasoning capabilities of large language models (LLMs)(Snell et al., 2024). Two primary approaches have been explored: (1) optimizing a large reasoning model (Xu et al., 2025) by reinforcement learning and reward models during post-training, which could generate intermediate reasoning steps before answering (OpenAI et al., 2024b; DeepSeek-AI et al., 2025) and (2) leveraging multi-agent system (MAS) collaboration to complete complex tasks that are difficult to solve by single inference (Han et al., 2024; Guo et al., 2024; Wang et al., 2024b; Tran et al., 2025). Compared to the success of inference time scaling on the single LLM, MAS faces multiple challenges. (1) Most are handcrafted, with limited scalability
14
+
15
+ ![](images/41ca0208ae845dfcc7d29091b7a96b65e246c011eca2516ebdfbc999dddb401b.jpg)
16
+ Figure 1: Overview of ReSo pipeline. ReSo first decomposes the task into a DAG; and then constructs an agent graph by topological sorting. First, it searches for agent candidates for each subtask node from the dynamic agent database (DADB). Then it leverages the Collaborative Reward Model (CRM) to choose the best agent and update the agent estimation in DADB.
17
+
18
+ and adaptability. The lack of an effective agent self-organization mechanism hinders large-scale cooperation. (2) Most assume all agent abilities are fully known while assigning tasks, which is unrealistic for LLM-based agents. (3) Reward signals are restricted to missing, self-evaluation or outcome only, resulting in poorly defined optimization objectives. (4) Existing MASs lack mechanisms for dynamically optimizing agent networks, making it difficult to achieve data-driven improvements. To address these limitations, we ask: Can we design a self-organizing MAS to learn directly from data via reward signals without handcrafting?
19
+
20
+ To realize this potential, we propose ReSo, a reward-driven self-organizing MAS that integrates task graph generation and agent graph construction. The key innovation of our approach is the incorporation of fine-grained reward signals by the Collaborative Reward Model (CRM), which leads to dynamic optimization of agent collaboration. Different from existing MASs, our approach is both scalable andizable, achieving state-of-the-art performance on complex reasoning tasks.
21
+
22
+ While ReSo builds on prior work in agent selection and task decomposition, its principal contribu
23
+
24
+ tion is the integrated formulation of these mechanisms within a self-organizing multi-agent reasoning framework. Our core insight is that individual agents exhibit heterogeneous expertise across different tasks and domains. During training, the CRM module evaluates each agent's performance and records these scores in the DADB in 3.3.1. At inference time, ReSo decomposes a complex problem into subtasks and consults the DADB to dynamically assign each subtask to the agent best suited for it. This emergent, self-organizing process sets ReSo apart from traditional, linear pipeline architectures. While extensive datasets exist for evaluating the reasoning capabilities of LLMs (Chang et al., 2023; Guo et al., 2023), high-quality MAS evaluation benchmarks are scarce. Therefore, we propose an automatic data synthesis method to generate various MAS tasks by converting existing LLM benchmarks into complex collaboration problems. This method provides step-by-step reward signals without additional human annotations, enabling efficient and scalable MAS evaluation. Our contributions can be summarized as:
25
+
26
+ - We first propose a Collaborative Reward Model, which can provide fine-grained reward signals for multi-agent collaboration.
27
+ - We present an automatic data synthesis method to generate arbitrarily complex MAS tasks from existing LLM benchmarks.
28
+ - We propose ReSo, the first scalable and customizable self-organizing MAS framework. Experimental results demonstrate the superior performance of ReSo on challenging tasks.
29
+
30
+ # 2 Related Work
31
+
32
+ # 2.1 Reward Guidance
33
+
34
+ The reward model has become a critical component in enhancing the capabilities of LLMs through post-training (Wang et al., 2024d). By providing feedback on the quality of LLM outputs, RMs facilitate performance improvement, enabling models to generate more accurate and detailed responses. The concept of reward-guided learning was first introduced in InstructGPT (Ouyang et al., 2022), which uses human feedback to fine-tune LLMs, aligning their behavior with user intent. In addition to outcome-based supervision, process-based supervision has been shown to improve the reasoning process itself (Uesato et al., 2022), enhancing not just the final answer but also the steps leading to it.
35
+
36
+ Building on this, (Lightman et al., 2023) intro
37
+
38
+ duced a process reward model (PRM) fine-tuned on PRM800K, which provides fine-grained and interpretable rewards for every reasoning step. Similarly, (Wang et al., 2024c) developed MathShepherd, an approach capable of autonomously generating process supervision data. Despite the advantages of neural-based reward models in terms of generalization, they also suffer from reward hacking (Gao et al., 2022; Skalse et al., 2022). To mitigate this, some recent approaches have employed rule-based rewards (DeepSeek-AI et al., 2025) or fixed inference budgets (Muennighoff et al., 2025), which have also proven effective. Notably, DeepSeek-R1 (DeepSeek-AI et al., 2025) incorporates both output accuracy and reasoning format evaluation, achieving the performance on par with OpenAI-O1 (OpenAI et al., 2024b; Qin et al., 2024). DeepSeek-R1 demonstrates that only using large-scale reinforcement learning based on rule-based reward during post-training can stimulate LLM's excellent reasoning ability, without supervised fine-tuning.
39
+
40
+ # 2.2 Multi-Agent System
41
+
42
+ Recent advances in LLM-based MAS have raised expectations for their ability to tackle increasingly complex reasoning tasks (Han et al., 2024; Guo et al., 2024; Wang et al., 2024b; Tran et al., 2025).
43
+
44
+ Predefined cooperation in MAS relies on structured interactions and role assignments before collaboration. Early works focus on MAS infrastructure, including Camel, AutoGen, and AgentVerse (Li et al., 2023; Wu et al., 2023; Chen et al., 2023). Some approaches adopt standard operating procedures for structured task decomposition, as seen in MetaGPT and ChatDev (Hong et al., 2024; Qian et al., 2024a; Dong et al., 2024). Fixed topologies are most adopted, such as hierarchical structures in MOA (Wang et al., 2024a) and directed acyclic graphs in MacNet and MAGDI (Qian et al., 2024b; Chen et al., 2024c). Predefined role interactions are also widely used such as debate (Du et al., 2023), criticism (Chen et al., 2024b), and certain math reasoning patterns (Gou et al., 2024; Lei et al., 2024; Xi et al., 2024). Predefined MASs exhibit several limitations including: (1) Scalability and adaptability being constrained by the imposition of rigid role assignments and fixed topological structures. (2) The unrealistic assumption that the agent's abilities are fully known when assigning tasks, which is particularly problematic for LLM-based agents.
45
+
46
+ Optimizable cooperation in MAS aims to dynam
47
+
48
+ ically adapt interaction topology and agent roles. GPTSwarm (Zhuge et al., 2024) formulates MAS as estimizable computational graphs, refining node prompts and inter-agent connectivity via evolutionary algorithms. DyLAN (Liu et al., 2024b) employs a layerwise feedforward agent network and a mutual rating mechanism to dynamically optimize MAS. G-Designer (Zhang et al., 2025a) utilizes variational graph auto-encoders to optimize MAS. Current optimizing approaches are highly underexplored. They often lack reliable, fine-grained reward signals for MAS collaboration, relying instead on outputs or self-generated reward mechanisms. Meanwhile, dynamic network optimization algorithms for MAS are also lacking.
49
+
50
+ # 3 Methods
51
+
52
+ To tackle the existing challenges in MAS research, we propose two core innovations: (1) ReSo, a reward-driven self-organizing MAS, which is capable of autonomously adapting to complex tasks and a flexible number of agent candidates, eliminating the need for handcrafted solutions. (2) Introduction of a Collaborative Reward Model (CRM), specifically tailored to optimize MAS performance. CRM can deliver fine-grained reward signals on multiagent collaboration, enabling data-driven MAS performance optimization.
53
+
54
+ # 3.1 Problem Formulation
55
+
56
+ We define a MAS algorithm $f_{MAS}$ as a function that, given a natural language question $Q$ , generates a graph-structured task decomposition, solves each subtask, and produces a final answer:
57
+
58
+ $$
59
+ f _ {M A S} (Q) \rightarrow \left(G = (V, E), A _ {V}, A _ {Q}\right) \tag {1}
60
+ $$
61
+
62
+ Here, $G = (V,E)$ represents the task decomposition graph, which is structured as a directed acyclic graph (DAG). The set of nodes $V = \{v_{1},v_{2},\ldots ,v_{n}\}$ corresponds to the subtasks derived from $Q$ , while the edges $E\subseteq V\times V$ define the dependencies between these subtasks. The system produces subtask answers $A_V = \{a_{v_1},a_{v_2},\dots ,a_{v_n}\}$ and ultimately derives the final answer $A_{Q}$ . To achieve this, we decompose $f_{MAS}$ into two sub-algorithms:
63
+
64
+ $$
65
+ f _ {M A S} (Q) = f _ {a g e n t} \circ f _ {t a s k} (Q) \qquad (2)
66
+ $$
67
+
68
+ $f_{task}$ is responsible for constructing the task decomposition graph from the input question, ensuring a structured breakdown of the problem into
69
+
70
+ subtasks and dependencies. $f_{agent}$ dynamically selects and assigns appropriate agents to solve the identified subtasks. This modular design enables independent optimization of each component, allowing for greater flexibility and scalability.
71
+
72
+ For the MAS-generated answer $A_{Q}$ to be considered correct, the following conditions must be satisfied: (1) All subtask answers must be correct. (2) All directed edges must correctly enforce the dependency relationships among subtasks. (3) The final output $A_{Q}$ must be correct.
73
+
74
+ # 3.2 Task Graph Construction
75
+
76
+ In the proposed method, $f_{task}$ first transforms the question $Q$ into a directed acyclic task graph $G$ :
77
+
78
+ $$
79
+ f _ {t a s k}: Q \rightarrow G = (V, E) \tag {3}
80
+ $$
81
+
82
+ where $G$ represents the decomposition of the original task $Q$ . Each node $v_{i} \in V$ is a natural language subtask, and each directed edge $(v_{i} \rightarrow v_{j}) \in E$ indicates that the subtask $v_{j}$ depends on the successful completion of $v_{i}$ .
83
+
84
+ In practice, we perform supervised fine-tuning (SFT) on an LLM to perform this step of task decomposition. Using our synthetic data, we explicitly require the LLM to decompose $Q$ into logical sub-problems, specify their execution order and dependencies, and output in a format of DAG.
85
+
86
+ # 3.3 Two-Stage Agent Search
87
+
88
+ Once the task graph is obtained, we need to assign each subtask to the most appropriate agent. We denote this agent assignment procedure as $f_{agent}$ . Conceptually, $f_{agent}$ classifies each node in the task graph according to the most suitable agent from a large agent pool $A$ , constructing an agent graph that maps each node to one or more selected agents.
89
+
90
+ $$
91
+ f _ {\text {a g e n t}}: v _ {i} \in V \rightarrow a _ {i} \in \mathcal {A} \tag {4}
92
+ $$
93
+
94
+ Since $\mathcal{A}$ can contain a large number of agents, we first introduce the concept of Dynamic Agent Database. Then we decompose the agent graph construction on every subtask into two search algorithms from coarse to fine-grained: first, select a subset of candidates from DADB then utilize the reward model to evaluate and select the best agent.
95
+
96
+ # 3.3.1 Dynamic Agent Database
97
+
98
+ To increase MAS's scalability and flexibility, we propose the Dynamic Agent Database (DADB), denoted as $\mathcal{A}$ , which enables adaptive agent selection by maintaining both static and dynamic agents
99
+
100
+ ![](images/3a7a8226b0a289b9337c04901014e73d43c922dbd8fa3aab4387e87a64145cac.jpg)
101
+ (a) Task Graph Construction
102
+
103
+ ![](images/1064f251e740fdda77812e651261e7954a7144298343430382d0229e95b23070.jpg)
104
+ (b) Training Stage of ReSo
105
+
106
+ ![](images/559b781579de43cc12eeac63a7401857eb1884d16145645ff1f8266fbdc21ed5.jpg)
107
+ (c) Testing Stage of ReSo
108
+ Figure 2: Illustration of our proposed ReSo. (a) We decompose the question into a subtask DAG. (b) The training of ReSo: we first use the UCB score to perform a coarse search in DADB and select top-k agents, then score the inference results using CRM, and update DADB by rewards. Repeat the above process for each node in DAG by topological order. (c) The testing of ReSo: we select the best agent from DADB.
109
+
110
+ profiles. For each agent $a_i \in \mathcal{A}$ , its static profile includes the base model, role settings, initial prompt, long-term memory, and tools. The dynamic profile, continuously updated via the reward model, tracks the agent's average reward $R(a_i)$ , computational cost $C(a_i)$ , and task count $n(a_i)$ . Initially, agents have only static attributes, while training iteratively refines their evaluations by the process reward model, optimizing future selection.
111
+
112
+ Given an input task $v_{j}$ , the DADB assigns a preliminary quality score $Q(a_{i},v_{j})$ to each agent $a_{i}$ , balancing task-agent similarity, historical performance, and computational costs:
113
+
114
+ $$
115
+ Q \left(a _ {i}, v _ {j}\right) = \operatorname {s i m} \left(a _ {i}, v _ {j}\right) \cdot \operatorname {p e r f o r m} \left(a _ {i}\right) \tag {5}
116
+ $$
117
+
118
+ where $\mathrm{sim}(a_i,v_j)$ represents the similarity between the subtask's target profile and the agent's static profile. In practice, we employ a Heaviside function which ensures that only agents exceeding a predefined similarity threshold $V_{th}$ are considered: $\mathrm{sim}(a_i,v_j) = H[\langle \mathbf{q_i},\mathbf{a_i}\rangle -V_{th}]$ where $\mathbf{q_i},\mathbf{a_i}$ are text embedding of subquestion and the agent static profile. The perform $(a_{i})$ term is given by perform $(a_{i}) = R(a_{i}) - \beta C(a_{i})$ , where $\beta$ con
119
+
120
+ trols the trade-off between the agent's historical performance and cost.
121
+
122
+ # 3.3.2 Coarse Agent Search by UCB
123
+
124
+ Given a DADB $\mathcal{A}$ and a subtask $v_{j}$ , our first objective is to retrieve a promising subset of $k$ candidate agents. To take advantage of the known information in DADB, also to explore unused agents, we adopt an Upper Confidence Bound value:
125
+
126
+ $$
127
+ \mathbf {U C B} \left(a _ {i}, q _ {j}\right) = Q \left(a _ {i}, q _ {j}\right) + c \sqrt {\frac {N}{n \left(a _ {i}\right) + \varepsilon}} \tag {6}
128
+ $$
129
+
130
+ where $N$ is the total number of agent selections and $n(a_{i})$ the number of times agent $i$ is selected, $\varepsilon \ll 1$ . $c$ is a constant controlling the exploration-exploitation trade-off. Agents with higher UCB scores are more likely to be selected, helping the MAS to explore potentially underutilized agents. For each subtask $q_{i}$ , we sort agents by their $\mathrm{UCB}(a_i,q_j)$ and choose the top $k$ agents as the candidate set $A_{\mathrm{cand}} = \{a_1,a_2,\ldots ,a_k\}$ .
131
+
132
+ # 3.3.3 Fine-grained Agent Evaluation by CRM
133
+
134
+ Once the candidate agents $\mathcal{A}_{\mathrm{cand}}$ are selected, we evaluate their performance on the current subtask
135
+
136
+ $v_{j}$ using a Collaborative Reward Model (CRM). This evaluation process is straightforward: each candidate agent $a_{i}$ generates an answer to the subtask $v_{j}$ : $a_{i}(v_{j})$ , and then we assess the quality of that answer based on a reward signal:
137
+
138
+ $$
139
+ r \left(a _ {i}, v _ {j}\right) = \operatorname {R e w a r d M o d e l} \left(a _ {i}, v _ {j}, a _ {i} \left(v _ {j}\right)\right) \tag {7}
140
+ $$
141
+
142
+ where RewardModel evaluates the quality of the solution based on the given agent's profile, subtask, and previous reasoning process. After evaluating the agents, we assign the agent with the highest reward, $a_{j}^{*}$ , to the subtask node $v_{j}$ , which means $a_{j}^{*}$ 's solution is used as $v_{j}$ 's answer. This process is repeated for each subtask on the graph.
143
+
144
+ The reward $r(a_{i}, v_{j})$ is computed using the CRM, which can be either rule-based (e.g., binary correctness: 0 for incorrect, 1 for correct) or neural-based (providing a score between 0 and 1 for quality). The reward model evaluates how well the agent's response aligns with the expected outcome, factoring in both the solution's correctness and its collaboration within the MAS.
145
+
146
+ # 3.4 Training and Inference Stage
147
+
148
+ Our multi-agent system can operate in two modes: training and testing. During training, we leverage a high-quality reward $r(a_{i}, v_{j})$ available for evaluating the correctness of every step of MAS. Upon receiving $r(a_{i}, v_{j})$ for each candidate agent, we update that agent's dynamic profile in DADB. For instance, we may maintain a running average of rewards:
149
+
150
+ $$
151
+ R \left(a _ {i}\right) \leftarrow \frac {n \left(a _ {i}\right) \cdot R \left(a _ {i}\right) + r \left(a _ {i} , v _ {j}\right)}{n \left(a _ {i}\right) + 1} \tag {8}
152
+ $$
153
+
154
+ similar for updating $costc(a_i, v_j)$ . By iteratively learning from data, the DADB can dynamically update agent evaluations based on historical reward, facilitating adaptive agent selection and improving both efficiency and performance. During testing, the reward model is no longer required. Instead, we leverage the learned DADB to select the best agent candidates and the best answer to each subtask.
155
+
156
+ # 3.5 The Perspective of MCTS
157
+
158
+ The task graph, after topological sorting, forms a decision tree where each node represents a subtask and the edges denote dependencies. At each level, we use UCB to prune the tree and select a subset of promising agents, then simulate each agent and
159
+
160
+ evaluate their performance using the CRM. The resulting reward updates the agent's dynamic profile, refining the selection strategy. The MAS construction is essentially finding the optimal path from the root to the leaves, maximizing the UCB reward for the best performance.
161
+
162
+ Consider there are $N$ agents and a task requiring $D$ agents to collaborate. Assume that the average inference cost is $c$ and the matching cost in DADB is $s \ll c$ per agent. A brute-force search has a complexity of $O(c \cdot N^D)$ , which becomes infeasible as $D$ and $D$ grow. In contrast, our self-organizing strategy, selecting top $k$ per step, reduces the cost to $O((s \cdot N + N \log N + k \cdot c) \cdot D)$ , offering a near-linear scaling with $N$ and $D$ , making the approach highly scalable for large $N$ and $D$ .
163
+
164
+ # 4 Data Synthesis
165
+
166
+ A key challenge in MAS is the lack of structured datasets for evaluating and training agent collaboration. To address this, we propose an automated framework that converts existing LLM datasets into structured, multi-step MAS tasks, enabling fine-grained evaluation without human annotations.
167
+
168
+ Random DAG Generation We begin by generating a DAG, $G = (V, E)$ . Each node $v_i \in V$ will be filled with a subtask $(q_i, a_i)$ , where $q_i$ is the textual description of the task, and $a_i$ is its numerical answer. The subtasks are sampled from the existing LLM benchmarks. The edges $E$ will encode dependency constraints between subtasks, ensuring that the solution to one subtask is required as an input for another, modeling the sequential reasoning process of multi-agent collaboration.
169
+
170
+ Subtask Selection and Filling To populate the nodes of $G$ , we construct a master pool of candidate subtasks, denoted as $\mathcal{P}$ . Each candidate subtask $p_i \in \mathcal{P}$ consists of a textual problem description $s_i$ , and a numerical answer $a_i$ . After obtaining $\mathcal{P}$ , we randomly sample from it and fill one question per node into the generated DAG. Candidate subtasks should have clear numerical or option answers, such as SciBench (Wang et al., 2024f), Math (Hendrycks et al., 2021), GPQA (Rein et al., 2023), etc. To ensure that the problem is computationally feasible for later dependency construction, we extract a numerical constant $c_i \in \mathbb{R}$ from the problem text. If the extracted constant is valid, the subtask is retained in $\mathcal{P}$ ; otherwise, it is discarded. This ensures that only problems with well-defined nu
171
+
172
+ merical attributes are incorporated.
173
+
174
+ Dependency Edge Construction After all nodes are populated, we generate natural language dependency descriptions for edges. Each edge $(v_{j} \rightarrow v_{k})$ should represent a relationship which connects previous subtask $v_{j}$ 's answer $a_{j}$ , with subsequent subtask $v_{k}$ 's question parameter $c_{k}$ . For each edge, we generate a textual description $e_{jk}$ , such as "in this question, $c_{k} =$ previous answer + 3." Formally, it is an algorithm that constructs a string from two numbers: $e_{ij} = f(a_{j}, c_{k})$ . $f$ can be implemented using elementary arithmetic and text templates, ensuring that no answers or parameters in the original subtask need to be manually modified. Once the DAG is fully constructed, we refine node descriptions by removing any explicitly given numerical constants $\{c_{i}\}$ that are now dependent on the results of prior nodes. Finally, an entire graph described in natural language is a piece of synthetic data.
175
+
176
+ The proposed data synthesis framework generates structured, multi-step reasoning tasks with adjustable sizes, ensuring diverse and scalable problem structures. The synthesized dataset supports both training and testing, enabling fine-grained evaluation without human annotations.
177
+
178
+ # 5 Experiments
179
+
180
+ In 5.1, we first use public datasets to create complex MAS benchmarks and fine-tune ReSo's task decomposition and collaborative reward models. All code, datasets, and models are publicly available. In 5.2, we train and evaluate ReSo on both public and synthetic datasets. 5.3 presents ablation studies on task decomposition, agent selection, and reward guidance mechanisms.
181
+
182
+ # 5.1 Data Synthesis and Model Fine-tuning
183
+
184
+ # 5.1.1 Data Synthesis
185
+
186
+ MATH (Hendrycks et al., 2021) consists of problems from diverse mathematical domains, while SciBench (Wang et al., 2024f) includes scientific reasoning tasks spanning physics, chemistry, and mathematics. Using these datasets, we apply the synthetic data generation method outlined in 4 to create two datasets: one for single LLM fine-tuning and another for benchmarking. Difficulty is categorized by the number of subtasks—Easy (3), Medium (5), and Hard (7).
187
+
188
+ Fine-tuning data For fine-tuning task decomposition LLM, we generate 14,500 questions and
189
+
190
+ answers from the MATH training set, with numbers of subtasks ranging from 2 to 6. For fine-tuning the neural-based CRM, we generate 5,000 questions from the same set, with 5 subtasks per question.
191
+
192
+ # 5.1.2 Model Fine-tuning
193
+
194
+ Task Decomposition Model Training To ensure high-quality task composition, we fine-tune a specialized model for task decomposition based on Qwen2.5-7B-Instruct. We use 14500 dialogues on task decomposition as described in 5.1.1, and fine-tune the model under a batch size of 128 and a learning rate of 1e-4 for 3 epochs. The fine-tuned model can reliably produce task decomposition in a structured format.
195
+
196
+ CRM Training The proposed CRM is fine-tuned based on Qwen2.5-Math-PRM-7B (Zhang et al., 2025b), which can provide effective process reward signals on MAS collaborative reasoning tasks. We use 5000 samples of sub-tasks with their answers as described in 5.1.1. We follow a simplified training scheme of PRMs, where the model should only perform binary classification on the special token at the end of the answer. The model is trained with a batch size of 128 and a learning rate of 1e-4 for 5 epochs. The fine-tuned model can output the probability of the answer being correct, which is then taken as the collaborative reward signal.
197
+
198
+ MAS Benchmarks We select 201 questions from SciBench as the sub-question data pool and synthesized complex data using the method in 4. This forms the SciBench-MAS dataset, comprising 200 easy-level training questions and 247 testing questions (107 easy, 80 medium, 62 hard). For MATH (Hendrycks et al., 2021), 348 level-5 questions are selected, from which we generate the Math-MAS dataset, consisting of 269 test questions for ReSo (91 easy, 89 medium, 89 hard).
199
+
200
+ # 5.2 Main Results of ReSo
201
+
202
+ Models and MASs We compare ReSo with state-of-the-art LLM and MAS methods. Our single-LLM baselines include GPT-4o (OpenAI et al., 2024a), Gemini-2.0-Flash (Team et al., 2024), Claude-3.5-Sonnet (Anthropic, 2024), Qwen2.5-Max (Yang et al., 2024), DeepSeek-V3 (Liu et al., 2024a). For ReSo, we build an agent database that includes these base models, extended to 63 agents with different prompts. For MAS, we evaluate MetaGPT (Hong et al., 2024), DyLAN (Liu et al.,
203
+
204
+ <table><tr><td rowspan="2">Method</td><td colspan="4">Math-MAS</td><td colspan="4">SciBench-MAS</td></tr><tr><td>Easy</td><td>Medium</td><td>Hard</td><td>Tokens</td><td>Easy</td><td>Medium</td><td>Hard</td><td>Tokens</td></tr><tr><td>GPT-4o</td><td>27.5</td><td>9.0</td><td>0.0</td><td>2.2k</td><td>39.3</td><td>12.5</td><td>1.6</td><td>2.1k</td></tr><tr><td>Gemini-2.0-Flash</td><td>69.2</td><td>24.7</td><td>9.0</td><td>3.0k</td><td>64.5</td><td>33.8</td><td>9.7</td><td>2.5k</td></tr><tr><td>Claude-3.5-Sonnet</td><td>12.1</td><td>0.0</td><td>0.0</td><td>1.0k</td><td>22.4</td><td>6.2</td><td>3.2</td><td>1.4k</td></tr><tr><td>Qwen2.5-Max</td><td>44.0</td><td>13.5</td><td>4.5</td><td>2.9k</td><td>55.1</td><td>30.0</td><td>4.8</td><td>2.8k</td></tr><tr><td>DeepSeek-V3</td><td>52.7</td><td>24.7</td><td>12.4</td><td>2.2k</td><td>52.3</td><td>31.3</td><td>12.9</td><td>2.3k</td></tr><tr><td>MetaGPT</td><td>30.8</td><td>12.4</td><td>2.2</td><td>16.1k</td><td>48.6</td><td>2.5</td><td>0.0</td><td>14.6k</td></tr><tr><td>DyLAN</td><td>40.7</td><td>9.0</td><td>0.0</td><td>64.1k</td><td>48.6</td><td>2.5</td><td>0.0</td><td>77.8k</td></tr><tr><td>GPTSwarm</td><td>35.2</td><td>5.6</td><td>4.5</td><td>14.9k</td><td>31.8</td><td>6.3</td><td>1.6</td><td>18.2k</td></tr><tr><td>GDesigner</td><td>14.2</td><td>5.6</td><td>0.0</td><td>16.9k</td><td>24.3</td><td>12.5</td><td>0.0</td><td>19.0k</td></tr><tr><td>ReSo (ours)</td><td>79.1</td><td>56.2</td><td>33.7</td><td>14.6k</td><td>67.3</td><td>51.3</td><td>32.3</td><td>20.7k</td></tr></table>
205
+
206
+ Table 1: Accuracy and average token usage on Math-MAS and SciBench-MAS. Bold and underlined represent optimal and suboptimal results, respectively. Tokens denotes the average number of tokens consumed per task.
207
+
208
+ 2024b), GPTSwarm (Zhuge et al., 2024), GDesigner (Zhang et al., 2025a). All MAS baselines use GPT-4o as the backbone.
209
+
210
+ Comparisons with LLMs As shown in Table 1, most single-model agents exhibit a sharp decrease in accuracy as the difficulty increases. At the hard difficulty level, their accuracy approaches zero, suggesting that single LLMs struggle with compositional reasoning. In particular, we show the results of these single LLMs on single Math and Scibench datasets in Appendix B, with accuracy rates of $80\% -90\%$ . This means that a single LLM can successfully solve a single sub-problem in the dataset, but its generalization ability for combined complex problems is very limited.
211
+
212
+ Comparisons with MASs Notably, ReSo outperforms other approaches in both the Math-MAS and SciBench-MAS datasets. At the hard difficulty level, ReSo reaches an accuracy of $33.7\%$ on MathMAS and $32.3\%$ on SciBench-MAS, while other MAS methods almost completely fail.
213
+
214
+ Results on Standard Benchmarks Our approach demonstrates robust performance not only on complex task datasets but also on widely adopted benchmarks. Table 2 summarizes the comparative accuracy, where ReSo consistently achieves the highest scores across all tasks. These results attest to ReSo's strong generalization capabilities and its effectiveness in mathematical and scientific reasoning, as well as related domains.
215
+
216
+ # 5.3 Ablation Studies
217
+
218
+ We conduct ablation studies on our proposed multiagent system, examining three core designs: task
219
+
220
+ Table 2: Comparison of accuracy (%) on standard benchmarks.
221
+
222
+ <table><tr><td>Method</td><td>GSM8K</td><td>GPQA</td><td>HumanEval</td><td>MMLU</td></tr><tr><td>DyLAN</td><td>88.16</td><td>49.55</td><td>89.70</td><td>80.16</td></tr><tr><td>GDesigner</td><td>95.07</td><td>53.57</td><td>89.90</td><td>84.50</td></tr><tr><td>GPTSwarm</td><td>89.74</td><td>52.23</td><td>88.49</td><td>83.98</td></tr><tr><td>ReSo (ours)</td><td>95.70</td><td>55.80</td><td>92.00</td><td>88.70</td></tr></table>
223
+
224
+ decomposition, agent selection, and reward signal.
225
+
226
+ Task Decomposition We compare three different approaches to task decomposition: (1) Ground Truth, representing an upper bound with human-crafted, meticulously designed task breakdowns; (2) GPT-4o, which autonomously decomposes complex tasks into sub-tasks without targeted fin-tuning; and (3) Qwen2.5-7B-SFT, a model fin-tuned on our dataset based on Qwen2.5-7B, specifically adapted to generate more effective decompositions for complex questions. Figure 3(a) presents the reasoning accuracy under different decomposition strategies. The ground-truth decomposition consistently yields the highest accuracy, underscoring the critical role of precise subproblem segmentation. Meanwhile, the fine-tuned task generator surpasses the naive GPT-4o approach, demonstrating that even a small amount of domain-specific training data can significantly improve decomposition quality and enhance overall system performance.
227
+
228
+ Agent Selection We compare three strategies for agent selection: a random strategy, a greedy strategy that always selects the most matching profile, and our proposed ReSo approach. As shown in Figure 3(b), ReSo significantly outperforms other
229
+
230
+ ![](images/e457abf0d69c0fff51f4038b0e2f72f51e0d4e344c3bfc58fd97d96e55d41d4b.jpg)
231
+ (a) Task Decomposition
232
+
233
+ ![](images/a614edad75c1d4479ef20135d2096a167d6fc6d42b68dd2413554c17d33c0375.jpg)
234
+ (b) Agent Selection
235
+
236
+ ![](images/34410f1184d7fe592986e269a72c6f02d6414fe867de36af9c6851419321627a.jpg)
237
+ (c) Reward Signal
238
+ Figure 3: Results of ablation studies. (a) Fine-tuning on domain-specific training data can significantly improve the decomposition quality, thus enhancing overall system performance. (b) Our robust agent selection strategy within the MAS is significant to the performance. (c) Compared to general reward models, our fine-tuned reward model is more task-specific and brings more precise reward signals, thus improving the system performance.
239
+
240
+ strategies across all the datasets, which emphasizes the importance of a robust agent selection strategy within the multi-agent framework. By strategically assigning each sub-task to the most suitable agent, the system can handle increasingly complex tasks with markedly better accuracy.
241
+
242
+ Reward Signal Ablation We investigate the impact of different reward signals on system optimization, considering three approaches. Figure 3(c) presents the results of training our MAS under these reward schemes on the SciBench-MAS dataset. Detailed in Appendix G
243
+
244
+ # 5.4 Scalability Analysis
245
+
246
+ Agent Scalability ReSo's modular design allows the dynamic addition of new agents without retraining the entire system. Each agent registers its static profile in the Dynamic Agent Database (DADB) and begins contributing immediately. For example, during our HumanEval experiments, we simply added some code-specialist agents on top of the existing 63 agents. ReSo seamlessly leveraged its capabilities to improve overall performance.
247
+
248
+ Task and Domain Generality ReSo is task-agnostic and domain-agnostic: as long as domain-specific data is available, it can generate a task DAG, select appropriate agents, and optimize their collaboration. Our automated data synthesis pipeline converts LLM benchmark into a multi-step MAS task without human annotations, enabling straightforward migration from mathematics and scientific reasoning to other fields.
249
+
250
+ Training Data Scalability The effectiveness of agent selection in ReSo grows with more train
251
+
252
+ ing data. During training, DADB maintains and updates each agent's reward statistics and cost estimates. As the number of training samples increases, ReSo builds a more accurate model of each agent's strengths and weaknesses, resulting in progressively better agent assignments and higher overall accuracy. Figure 4 shows that ReSo's accuracy increases with the training process
253
+
254
+ ![](images/5fefc63fd93a270cf59844b719101da800b9b4d0e1b21b0d257d444ecd4d7e79.jpg)
255
+ Figure 4: Training Curve of ReSo.
256
+
257
+ # 6 Conclusion
258
+
259
+ In this work, we introduce ReSo, a reward-driven self-organizing MAS for complex reasoning. By integrating a collaborative reward model, ReSo automates agent selection and collaboration, improving scalability and adaptability. The automated data synthesis framework eliminates manual annotations. Experiments show that ReSo outperforms existing MAS and single LLM baselines. All codes, models, and data have been open-sourced. We expect ReSo to enable co-optimization of MAS and LLM to further enhance reasoning capabilities.
260
+
261
+ # 7 Limitations
262
+
263
+ Although the base model for the agents is a fixed model, ReSo has demonstrated strong optimizability and scalability as well as good performance. A further interesting research question is: Can the optimization of MAS be performed together with the optimization of a single LLM agent? Specifically, can the reward signal given to the model by our CRM in each step of cooperation be combined with the reinforcement learning-based post-training of a single model to further optimize MAS at both the macro and micro levels? This means a dynamic agent cooperation network, where agents can not only learn how to interact with each other but also fine-tune their weights through feedback from cooperation. We look forward to follow-up research.
264
+
265
+ # 8 Ethical Considerations
266
+
267
+ While our proposed ReSo framework focuses on reasoning tasks in the domains of mathematics and science, it has the potential to be applied in other, possibly unethical, contexts. Such misuse could pose significant threats to human society. We strongly urge readers to carefully consider these ethical implications and to adopt a conscientious approach in the development and application of these methods.
268
+
269
+ # References
270
+
271
+ AI Anthropic. 2024. The claude 3 model family: Opus, sonnet, haiku. *Claude-3 Model Card*.
272
+ Cameron B Browne, Edward Powley, Daniel Whitehouse, Simon M Lucas, Peter I Cowling, Philipp Rohlfshagen, Stephen Tavener, Diego Perez, Spyridon Samothrakis, and Simon Colton. 2012. A survey of monte carlo tree search methods. IEEE Transactions on Computational Intelligence and AI in games, 4(1):1-43.
273
+ Yupeng Chang, Xu Wang, Jindong Wang, Yuan Wu, Linyi Yang, Kaijie Zhu, Hao Chen, Xiaoyuan Yi, Cunxiang Wang, Yidong Wang, Wei Ye, Yue Zhang, Yi Chang, Philip S. Yu, Qiang Yang, and Xing Xie. 2023. A survey on evaluation of large language models. Preprint, arXiv:2307.03109.
274
+ Guoxin Chen, Minpeng Liao, Chengxi Li, and Kai Fan. 2024a. Alphamath almost zero: Process supervision without process. Preprint, arXiv:2405.03553.
275
+ Justin Chih-Yao Chen, Archiki Prasad, Swarnadeep Saha, Elias Stengel-Eskin, and Mohit Bansal. 2024b. Magicore: Multi-agent, iterative, coarse-to-fine refinement for reasoning. Preprint, arXiv:2409.12147.
276
+
277
+ Justin Chih-Yao Chen, Swarnadeep Saha, Elias Stengel-Eskin, and Mohit Bansal. 2024c. Magdi: Structured distillation of multi-agent interaction graphs improves reasoning in smaller language models. Preprint, arXiv:2402.01620.
278
+ Weize Chen, Yusheng Su, Jingwei Zuo, Cheng Yang, Chenfei Yuan, Chi-Min Chan, Heyang Yu, Yaxi Lu, Yi-Hsin Hung, Chen Qian, Yujia Qin, Xin Cong, Ruobing Xie, Zhiyuan Liu, Maosong Sun, and Jie Zhou. 2023. Agentverse: Facilitating multiagent collaboration and exploring emergent behaviors. Preprint, arXiv:2308.10848.
279
+ DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z. F. Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, Aixin Liu, Bing Xue, Bingxuan Wang, Bochao Wu, Bei Feng, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Qu, Hui Li, Jianzhong Guo, Jiashi Li, Jiawei Wang, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, J. L. Cai, Jiaqi Ni, Jian Liang, Jin Chen, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Liang Zhao, Litong Wang, Liyue Zhang, Lei Xu, Leyi Xia, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Meng Li, Miaojun Wang, Mingming Li, Ning Tian, Panpan Huang, Peng Zhang, Qiancheng Wang, Qinyu Chen, Qiushi Du, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, R. J. Chen, R. L. Jin, Ruyi Chen, Shanghai Lu, Shangyan Zhou, Shanhuang Chen, Shengfeng Ye, Shiyu Wang, Shuiping Yu, Shunfeng Zhou, Shuting Pan, S. S. Li, Shuang Zhou, Shaoqing Wu, Shengfeng Ye, Tao Yun, Tian Pei, Tianyu Sun T. Wang, Wangding Zeng, Wanjia Zhao, Wen Liu, Wenfeng Liang, Wenjun Gao, Wenqin Yu, Wentao Zhang W. L. Xiao Wei An Xiaodong Liu Xiaohan Wang Xiaokang Chen Xiaotao Nie Xin Cheng Xin Liu Xin Xie Xingchao Liu Xinyu Yang Xinyuan Li Xuecheng Su Xuheng Lin X.Q.Li Xiangyue Jin Xiaojin Shen Xiaosha Chen Xiaowen Sun Xiaoxiang Wang Xinnan Song Xinyi Zhou Xianzu Wang Xinxia Shan Y.K. Li Y.Q.Wang Y.X. Wei Yang Zhang Yanhong Xu Yao Li Yao Zhao Yaofeng Sun Yaohui Wang Yi Yu Yichao Zhang Yifan Shi Yiliang Xiong Ying He Yishi Piao Yisong Wang Yixuan Tan Yiyang Ma Yiyuan Liu Yongqiang Guo Yuan Ou Yuduan Wang Yue Gong Yuheng Zou Yujia He Yunfan Xiong Yuxiang Luo Yuxiang You Yuxuan Liu Yuyang Zhou Y.X.Zhu Yanhong Xu Yanping Huang Yaohui Li Yi Zheng Yuchen Zhu Yunxian Ma Ying Tang Yukun Zha Yuting Yan Z.Z.Ren Zehui Ren Zhang Sha Zhe Fu Zhean Xu Zhenda Xie Zhengyan Zhang Zhewen Hao Zhicheng Ma Zhigang Yan Zhiyu Wu Zihui Gu Zi jia Zhu Zijun Liu Zilin Li Ziwei Xie Ziyang Song Zizheng Pan Zhen Huang Zhipeng Xu Zhongyu
280
+
281
+ Zhang, and Zhen Zhang. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. Preprint, arXiv:2501.12948.
282
+ Yihong Dong, Xue Jiang, Zhi Jin, and Ge Li. 2024. Self-collaboration code generation via chatgpt. Preprint, arXiv:2304.07590.
283
+ Yilun Du, Shuang Li, Antonio Torralba, Joshua B. Tenenbaum, and Igor Mordatch. 2023. Improving factuality and reasoning in language models through multiagent debate. Preprint, arXiv:2305.14325.
284
+ Xidong Feng, Ziyu Wan, Muning Wen, Stephen Marcus McAleer, Ying Wen, Weinan Zhang, and Jun Wang. 2024. Alphazero-like tree-search can guide large language model decoding and training. Preprint, arXiv:2309.17179.
285
+ Leo Gao, John Schulman, and Jacob Hilton. 2022. Scaling laws for reward model overoptimization. Preprint, arXiv:2210.10760.
286
+ Zhibin Gou, Zhihong Shao, Yeyun Gong, Yelong Shen, Yujiu Yang, Minlie Huang, Nan Duan, and Weizhu Chen. 2024. Tora: A tool-integrated reasoning agent for mathematical problem solving. Preprint, arXiv:2309.17452.
287
+ Taicheng Guo, Xiuying Chen, Yaqi Wang, Ruidi Chang, Shichao Pei, Nitesh V. Chawla, Olaf Wiest, and Xiangliang Zhang. 2024. Large language model based multi-agents: A survey of progress and challenges. Preprint, arXiv:2402.01680.
288
+ Zishan Guo, Renren Jin, Chuang Liu, Yufei Huang, Dan Shi, Supryadi, Linhao Yu, Yan Liu, Jiaxuan Li, Bojian Xiong, and Deyi Xiong. 2023. Evaluating large language models: A comprehensive survey. Preprint, arXiv:2310.19736.
289
+ Shanshan Han, Qifan Zhang, Yuhang Yao, Weizhao Jin, Zhaozhuo Xu, and Chaoyang He. 2024. Llm multiagent systems: Challenges and open problems. arXiv preprint arXiv:2402.03578.
290
+ Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. 2021. Measuring mathematical problem solving with the math dataset. Preprint, arXiv:2103.03874.
291
+ Sirui Hong, Mingchen Zhuge, Jiaqi Chen, Xiawu Zheng, Yuheng Cheng, Ceyao Zhang, Jinlin Wang, Zili Wang, Steven Ka Shing Yau, Zijuan Lin, Liyang Zhou, Chenyu Ran, Lingfeng Xiao, Chenglin Wu, and Jurgen Schmidhuber. 2024. Metagpt: Meta programming for a multi-agent collaborative framework. Preprint, arXiv:2308.00352.
292
+ Bin Lei, Yi Zhang, Shan Zuo, Ali Payani, and Caiwen Ding. 2024. Macm: Utilizing a multi-agent system for condition mining in solving complex mathematical problems. Preprint, arXiv:2404.04735.
293
+
294
+ Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani, Dmitrii Khizbullin, and Bernard Ghanem. 2023. Camel: Communicative agents for "mind" exploration of large language model society. Preprint, arXiv:2303.17760.
295
+ Qingyao Li, Wei Xia, Kounianhua Du, Xinyi Dai, Ruiming Tang, Yasheng Wang, Yong Yu, and Weinan Zhang. 2024. Rethinkmcts: Refining erroneous thoughts in monte carlo tree search for code generation. Preprint, arXiv:2409.09584.
296
+ Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. 2023. Let's verify step by step. Preprint, arXiv:2305.20050.
297
+ Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. 2024a. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437.
298
+ Zijun Liu, Yanzhe Zhang, Peng Li, Yang Liu, and Diyi Yang. 2024b. A dynamic llm-powered agent network for task-oriented agent collaboration. Preprint, arXiv:2310.02170.
299
+ Liangchen Luo, Yinxiao Liu, Rosanne Liu, Samrat Phatale, Meiqi Guo, Harsh Lara, Yunxuan Li, Lei Shu, Yun Zhu, Lei Meng, Jiao Sun, and Abhinav Rastogi. 2024. Improve mathematical reasoning in language models by automated process supervision. Preprint, arXiv:2406.06592.
300
+ Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. 2025. s1: Simple test-timescaling. Preprint, arXiv:2501.19393.
301
+ OpenAI,., Aaron Hurst, Adam Lerer, Adam P. Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, Aleksander Madry, Alex Baker-Whitcomb, Alex Beutel, Alex Borzunov, Alex Carney, Alex Chow, Alex Kirillov, Alex Nichol, Alex Paino, Alex Renzin, Alex Tachard Passos, Alexander Kirillov, Alexi Christakis, Alexis Conneau, Ali Kamali, Allan Jabri, Allison Moyer, Allison Tam, Amadou Crookes, Amin Tootoochian, Amin Tootoonchian, Ananya Kumar, Andrea Vallone, Andrej Karpathy, Andrew Braunstein, Andrew Cann, Andrew Codispoti, Andrew Galu, Andrew Kondrich, Andrew Tulloch, Andrey Mishchenko, Angela Baek, Angela Jiang, Antoine Pelisse, Antonia Woodford, Anuj Gosalia, Arka Dhar, Ashley Pantuliano, Avi Nayak, Avital Oliver, Barret Zoph, Behrooz Ghorbani, Ben Leimberger, Ben Rossen, Ben Sokolowsky, Ben Wang, Benjamin Zweig, Beth Hoover, Blake Samic, Bob McGrew, Bobby Spero, Bogo Giertler, Bowen Cheng, Brad Lightcap, Brandon Walkin, Brendan Quinn, Brian Guarraci, Brian Hsu, Bright Kellogg, Brydon Eastman, Camillo Lugaresi, Carroll Wainwright, Cary
302
+
303
+ Bassin, Cary Hudson, Casey Chu, Chad Nelson, Chak Li, Chan Jun Shern, Channing Conger, Charlotte Barette, Chelsea Voss, Chen Ding, Cheng Lu, Chong Zhang, Chris Beaumont, Chris Hallacy, Chris Koch, Christian Gibson, Christina Kim, Christine Choi, Christine McLeavey, Christopher Hesse, Claudia Fischer, Clemens Winter, Coley Czarnecki, Colin Jarvis, Colin Wei, Constantin Koumouzelis, Dane Sherburn, Daniel Kappler, Daniel Levin, Daniel Levy, David Carr, David Farhi, David Mely, David Robinson, David Sasaki, Denny Jin, Dev Valladares, Dimitris Tsipras, Doug Li, Duc Phong Nguyen, Duncan Findlay, Edede Oiwoh, Edmund Wong, Ehsan Asdar, Elizabeth Proehl, Elizabeth Yang, Eric Antonow, Eric Kramer, Eric Peterson, Eric Sigler, Eric Wallace, Eugene Brevdo, Evan Mays, Farzad Khorasani, Felipe Petroski Such, Filippo Raso, Francis Zhang, Fred von Lohmann, Freddie Sulit, Gabriel Goh, Gene Oden, Geoff Salmon, Giulio Starace, Greg Brockman, Hadi Salman, Haiming Bao, Haitang Hu, Hannah Wong, Haoyu Wang, Heather Schmidt, Heather Whitney, Heewoo Jun, Hendrik Kirchner, Henrique Ponde de Oliveira Pinto, Hongyu Ren, Huiwen Chang, Hyung Won Chung, Ian Kivlichan, Ian O'Connell, Ian O'Connell, Ian Osband, Ian Silber, Ian Sohl, Ibrahim Okuyucu, Ikai Lan, Ilya Kostrikov, Ilya Sutskever, Ingmar Kanitscheider, Ishaan Gulrajani, Jacob Coxon, Jacob Menick, Jakub Pachocki, James Aung, James Betker, James Crooks, James Lennon, Jamie Kiros, Jan Leike, Jane Park, Jason Kwon, Jason Phang, Jason Teplitz, Jason Wei, Jason Wolfe, Jay Chen, Jeff Harris, Jenia Varavva Jessica Gan Lee Jessica Shieh Ji Lin Jiahui Yu Jiayi Weng Jie Tang Jieqi Yu Joanne Jang Joaquin Quinonero Candela Joe Beutler Joe Landers Joel Parish Johannes Heidecke John Schulman Jonathan Lachman Jonathan McKay Jonathan Uesato Jonathan Ward Jong Wook KimJoost Huizinga Jordan Sitkin Jos Kraaijeveld Josh Gross Josh Kaplan Josh Snyder Joshua Achiam Joy Jiao Joyce Lee,Juntang Zhuang Justyn Harriman,Kai Fricke,Kai Hayashi Karan Singhal Katy ShiKavin Karthik Kayla Wood Kendra Rimbach Kenny Hsu Kenny Nguyen Keren Gu-Lemberg Kevin Button. Kevin Liu Kiel Howe Krithika Muthukumar Kyle Luther Lama Ahmad Larry Kai Lauren Itow Lauren Workman Leher Pathak Leo Chen Li JingLia Guy Liam Fedus Liang Zhou Lien Mamitsuka Lilian WengLindsay McCallumLindsey Held Long Ouyang Louis Feuvrier Lu Zhang Lukas Kondraciuk Lukasz Kaiser Luke Hewitt Luke Metz Lyric Doshi Mada Aflak Maddie Simens Madelaine Boyd Madeleine Thompson Marat Dukhan Mark Chen Mark Gray Mark Hudnall Marvin Zhang Marwan Aljubeh Mateusz Litwin Matthew Zeng Max Johnson Maya Shetty Mayank Gupta Meghan Shah Mehmet Yatbaz,Meng Jia Yang Mengchao Zhong Mia Glaese Mianna Chen Michael Janner Michael Lampe Michael Petrov Michael Wu Michele Wang Michelle Fradin Michelle Pokrass Miguel Castro Miguel Oom Temudo de Castro Mikhail Pavlov Miles Brundage Miles Wang Minal Khan Mira Murati Mo Bavarian Molly Lin Murat Yesildal,Nacho Soto Natalia Gimelshein Na
304
+
305
+ talie Cone, Natalie Staudacher, Natalie Summers, Natan LaFontaine, Neil Chowdhury, Nick Ryder, Nick Stathas, Nick Turley, Nik Tezak, Nik Felix, Nithanth Kudige, Nitish Keskar, Noah Deutsch, Noel Bundick, Nora Puckett, Ofir Nachum, Ola Okelola, Oleg Boiko, Oleg Murk, Oliver Jaffe, Olivia Watkins, Olivier Godement, Owen Campbell-Moore, Patrick Chao, Paul McMillan, Pavel Belov, Peng Su, Peter Bak, Peter Bakkum, Peter Deng, Peter Dolan, Peter Hoeschele, Peter Welinder, Phil Tillet, Philip Pronin, Philippe Tillet, Prafulla Dhariwal, Qiming Yuan, Rachel Dias, Rachel Lim, Rahul Arora, Rajan Troll, Randall Lin, Rapha Gontijo Lopes, Raul Puri, Reah Miyara, Reimar Leike, Renaud Gaubert, Reza Zamani, Ricky Wang, Rob Donnelly, Rob Honsby, Rocky Smith, Rohan Sahai, Rohit Ramchandani, Romain Huet, Rory Carmichael, Rowan Zellers, Roy Chen, Ruby Chen, Ruslan Nigmatullin, Ryan Cheu, Saachi Jain, Sam Altman, Sam Schoenholz, Sam Toizer, Samuel Miserendino, Sandhini Agarwal, Sara Culver, Scott Ethersmith, Scott Gray, Sean Grove, Sean Metzger, Shamez Hermani, Shantanu Jain, Shengjia Zhao, Sherwin Wu, Shino Jomoto, Shirong Wu, Shuaiqi, Xia, Sonia Phene, Spencer Papay, Srinivas Narayanan, Steve Coffey, Steve Lee, Stewart Hall, Suchir Balaji Tal Broda Tal Stramer Tao Xu Tarun Gogineni Taya Christianson Ted Sanders Tejal Patwardhan Thomas Cunninghamman Thomas Degry Thomas Dimson Thomas Raoux Thomas Shadwell, Tianhao Zheng Todd Underwood,Todor Markov Toki Sherbakov Tom Rubin Tom Stasi Tomer Kaftan, Tristan Heywood Troy Peterson Tyce Walters,Tyna Eloundou,Velice Qi Veit Moeller Vinnie Monaco,Vishal Kuo,Vlad Fomenko,Wayne ChangWeiyi ZhengWenda ZhouWesam Manassra Will Sheu Wojciech Zaremba,Yash Patil Yilei Qian Yongjik KimYoulong ChengYu ZhangYuchen HeYuchen ZhangYujia JinYunxing Dai and Yury Malkov. 2024a. Gpt-4o system card. Preprint arXiv:2410.21276.
306
+
307
+ OpenAI,.; Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, Alex Iftimie, Alex Karpenko, Alex Tachard Passos, Alexander Neitz, Alexander Prokofiev, Alexander Wei, Allison Tam, Ally Bennett, Ananya Kumar, Andre Saraiva, Andrea Vallone, Andrew Duberstein, Andrew Kondrich, Andrey Mishchenko, Andy Applebaum, Angela Jiang, Ashvin Nair, Barret Zoph, Behrooz Ghorbani, Ben Rossen, Benjamin Sokolowsky, Boaz Barak, Bob McGrew, Borys Minajiev, Botao Hao, Bowen Baker, Brandon Houghton, Brandon McKinzie, Brydon Eastman, Camillo Lugaresi, Cary Bassin, Cary Hudson, Chak Ming Li, Charles de Bourcy, Chelsea Voss, Chen Shen, Chong Zhang, Chris Koch, Chris Orsinger, Christopher Hesse, Claudia Fischer, Clive Chan, Dan Roberts, Daniel Kappler, Daniel Levy, Daniel Selsam, David Dohan, David Farhi, David Mely, David Robinson, Dimitris Tsipras, Doug Li, Dragos Oprica, Eben Freeman, Eddie Zhang, Edmund Wong, Elizabeth Proehl, Enoch Cheung, Eric Mitchell, Eric Wallace, Erik Ritter, Evan Mays, Fan Wang, Felipe Petroski Such
308
+
309
+ Filippo Raso, Florencia Leoni, Foivos Tsimpourlas, Francis Song, Fred von Lohmann, Freddie Sulit, Geoff Salmon, Giambattista Parascandolo, Gildas Chabot, Grace Zhao, Greg Brockman, Guillaume Leclerc, Hadi Salman, Haiming Bao, Hao Sheng, Hart Andrin, Hessam Bagherinezhad, Hongyu Ren, Hunter Lightman, Hyung Won Chung, Ian Kivlichan, Ian O'Connell, Ian Osband, Ignasi Clavera Gilaberte, Ilge Akkaya, Ilya Kostrikov, Ilya Sutskever, Irina Kofman, Jakub Pachocki, James Lennon, Jason Wei, Jean Harb, Jerry Twore, Jiacheng Feng, Jiahui Yu, Jiayi Weng, Jie Tang, Jieqi Yu, Joaquin Quiñonero Candela, Joe Palermo, Joel Parish, Johannes Heidecke, John Hallman, John Rizzo, Jonathan Gordon, Jonathan Uesato, Jonathan Ward, Joost Huizinga, Julie Wang, Kai Chen, Kai Xiao, Karan Singhal, Karina Nguyen, Karl Cobbe, Katy Shi, Kayla Wood, Kendra Rimbach, Keren Gu-Lemberg, Kevin Liu, Kevin Lu, Kevin Stone, Kevin Yu, Lama Ahmad, Lauren Yang, Leo Liu, Leon Maksin, Leyton Ho, Liam Fedus, Lilian Weng, Linden Li, Lindsay McCallum, Lindsey Held, Lorenz Kuhn, Lukas Kondraciuk, Lukasz Kaiser, Luke Metz, Madelaine Boyd, Maja Trebacz, Manas Joglekar, Mark Chen, Marko Tintor, Mason Meyer, Matt Jones, Matt Kauper, Max Schwarzer, Meghan Shah, Mehmet Yatbaz, Melody Y. Guan, Mengyuan Xu, Mengyuan Yan, Mia Glaese, Mianna Chen, Michael Lampe, Michael Malek, Michele Wang, Michelle Fradin, Mike McClay, Mikhail Pavlov, Miles Wang, Mingxuan Wang, Mira Murati, Mo Bavarian, Mostafa Rohaninejad, Nat McAleese, Neil Chowdhury, Neil Chowdhury, Nick Ryder, Nikolas Tezak, Noam Brown, Ofir Nachum, Oleg Boiko, Oleg Murk, Olivia Watkins, Patrick Chao, Paul Ashbourne, Pavel Izmailov, Peter Zhokhov, Rachel Dias, Rahul Arora, Randall Lin, Rapha Gontijo Lopes, Raz Gaon, Reah Miyara, Reimar Leike, Renny Hwang, Rhythm Garg, Robin Brown, Roshan James, Rui Shu, Ryan Cheu, Ryan Greene, Saachi Jain, Sam Altman, Sam Toizer, Sam Toyer, Samuel Miserendino, Sandhini Agarwal Santiago Hernandez, Sasha Baker Scott McKinney Scottie Yan Shengjia Zhao Shengli Hu Shibani Santurkar, Shraman Ray Chaudhuri Shuyuan Zhang Siyuan Fu Spencer Papay Steph Lin Suchir Balaji Suvansh Sanjeev Szymon Sidor Tal Broda Aidan Clark Tao Wang Taylor Gordon Ted Sanders Tejal Patwardhan Thibault Sottiaux Thomas Degry Thomas Dimson Tianhao Zheng Timur Garipov Tom Stasi Trapit Bansal Trevor Creech Troy Peterson Tyna Eloundou Valerie Qi,Vineet Kosaraju Vinnie Monaco Vitchyr Pong Vlad Fomenko Weiyi ZhengWenda ZhouWes McCabe Wojciech ZarembaYann Dubois Yinghai Lu Yining Chen Young ChaYu BaiYuchen HeYuchen Zhang Yunyun WangZheng Shao and Zhuohan Li. 2024b. Openai o1 system card.Preprint arXiv:2412.16720.
310
+
311
+ Long Ouyang, Jeff Wu, Xu Jiang, Diogo Almeida, Carroll L. Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, John Schulman, Jacob Hilton, Fraser Kelton, Luke Miller, Maddie Simens, Amanda Askell, Peter Welinder, Paul Christiano, Jan Leike, and Ryan Lowe. 2022.
312
+
313
+ Training language models to follow instructions with human feedback. Preprint, arXiv:2203.02155.
314
+
315
+ Chen Qian, Wei Liu, Hongzhang Liu, Nuo Chen, Yufan Dang, Jiahao Li, Cheng Yang, Weize Chen, Yusheng Su, Xin Cong, Juyuan Xu, Dahai Li, Zhiyuan Liu, and Maosong Sun. 2024a. Chatdev: Communicative agents for software development. Preprint, arXiv:2307.07924.
316
+
317
+ Chen Qian, Zihao Xie, Yifei Wang, Wei Liu, Yufan Dang, Zhuoyun Du, Weize Chen, Cheng Yang, Zhiyuan Liu, and Maosong Sun. 2024b. Scaling large-language-model-based multi-agent collaboration. Preprint, arXiv:2406.07155.
318
+
319
+ Yiwei Qin, Xuefeng Li, Haoyang Zou, Yixiu Liu, Shijie Xia, Zhen Huang, Yixin Ye, Weizhe Yuan, Hector Liu, Yuanzhi Li, and Pengfei Liu. 2024. O1 replication journey: A strategic progress report - part 1. Preprint, arXiv:2410.18982.
320
+
321
+ David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R. Bowman. 2023. Gpqa: A graduate-level google-proof q&a benchmark. Preprint, arXiv:2311.12022.
322
+
323
+ Joar Skalse, Nikolaus H. R. Howe, Dmitrii Krasheninnikov, and David Krueger. 2022. Defining and characterizing reward hacking. Preprint, arXiv:2209.13085.
324
+
325
+ Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. 2024. Scaling llm test-time compute optimally can be more effective than scaling model parameters. Preprint, arXiv:2408.03314.
326
+
327
+ Gemini Team, Petko Georgiev, Ving Ian Lei, Ryan Burnell, Libin Bai, Anmol Gulati, Garrett Tanzer, Damien Vincent, Zhufeng Pan, Shibo Wang, et al. 2024. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530.
328
+
329
+ Khanh-Tung Tran, Dung Dao, Minh-Duong Nguyen, Quoc-Viet Pham, Barry O'Sullivan, and Hoang D. Nguyen. 2025. Multi-agent collaboration mechanisms: A survey of llms. Preprint, arXiv:2501.06322.
330
+
331
+ Jonathan Uesato, Nate Kushman, Ramana Kumar, Francis Song, Noah Siegel, Lisa Wang, Antonia Creswell, Geoffrey Irving, and Irina Higgins. 2022. Solving math word problems with process- and outcome-based feedback. Preprint, arXiv:2211.14275.
332
+
333
+ Junlin Wang, Jue Wang, Ben Athiwaratkun, Ce Zhang, and James Zou. 2024a. Mixture-of-agents enhances large language model capabilities. Preprint, arXiv:2406.04692.
334
+
335
+ Lei Wang, Chen Ma, Xueyang Feng, Zeyu Zhang, Hao Yang, Jingsen Zhang, Zhiyuan Chen, Jiakai Tang, Xu Chen, Yankai Lin, Wayne Xin Zhao, Zhewei Wei, and Jirong Wen. 2024b. A survey on large language
336
+
337
+ model based autonomous agents. Frontiers of Computer Science, 18(6).
338
+ Peiyi Wang, Lei Li, Zhihong Shao, R. X. Xu, Damai Dai, Yifei Li, Deli Chen, Y. Wu, and Zhifang Sui. 2024c. Math-shepherd: Verify and reinforce llms step-by-step without human annotations. Preprint, arXiv:2312.08935.
339
+ Shuhe Wang, Shengyu Zhang, Jie Zhang, Runyi Hu, Xiaoya Li, Tianwei Zhang, Jiwei Li, Fei Wu, Guoyin Wang, and Eduard Hovy. 2024d. Reinforcement learning enhanced llms: A survey. Preprint, arXiv:2412.10400.
340
+ Tianlong Wang, Junzhe Chen, Xueting Han, and Jing Bai. 2024e. Cpl: Critical plan step learning boosts llm generalization in reasoning tasks. Preprint, arXiv:2409.08642.
341
+ Xiaoxuan Wang, Ziniu Hu, Pan Lu, Yanqiao Zhu, Jieyu Zhang, Satyen Subramaniam, Arjun R. Loomba, Shichang Zhang, Yizhou Sun, and Wei Wang. 2024f. Scibench: Evaluating college-level scientific problem-solving abilities of large language models. Preprint, arXiv:2307.10635.
342
+ Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. 2023. Self-consistency improves chain of thought reasoning in language models. Preprint, arXiv:2203.11171.
343
+ Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed Chi, Quoc Le, and Denny Zhou. 2023. Chain-of-thought prompting elicits reasoning in large language models. Preprint, arXiv:2201.11903.
344
+ Qingyun Wu, Gagan Bansal, Jieyu Zhang, Yiran Wu, Beibin Li, Erkang Zhu, Li Jiang, Xiaoyun Zhang, Shaokun Zhang, Jiale Liu, Ahmed Hassan Awadallah, Ryen W White, Doug Burger, and Chi Wang. 2023. Autogen: Enabling next-gen llm applications via multi-agent conversation. Preprint, arXiv:2308.08155.
345
+ Zhiheng Xi, Dingwen Yang, Jixuan Huang, Jiafu Tang, Guanyu Li, Yiwen Ding, Wei He, Boyang Hong, Shihan Do, Wenyu Zhan, Xiao Wang, Rui Zheng, Tao Ji, Xiaowei Shi, Yitao Zhai, Rongxiang Weng, Jingang Wang, Xunliang Cai, Tao Gui, Zuxuan Wu, Qi Zhang, Xipeng Qiu, Xuanjing Huang, and YuGang Jiang. 2024. Enhancing llm reasoning via critique models with test-time and training-time supervision. Preprint, arXiv:2411.16579.
346
+ Fengli Xu, Qianyue Hao, Zefang Zong, Jingwei Wang, Yunke Zhang, Jingyi Wang, Xiaochong Lan, Jiahui Gong, Tianjian Ouyang, Fanjin Meng, Chenyang Shao, Yuwei Yan, Qinglong Yang, Yiwen Song, Sijian Ren, Xinyuan Hu, Yu Li, Jie Feng, Chen Gao, and Yong Li. 2025. Towards large reasoning models: A survey of reinforced reasoning with large language models. Preprint, arXiv:2501.09686.
347
+
348
+ An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. 2024. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115.
349
+ Dan Zhang, Sining Zhoubian, Ziniu Hu, Yisong Yue, Yuxiao Dong, and Jie Tang. 2024. Rest-mcts*: Llm self-training via process reward guided tree search. Preprint, arXiv:2406.03816.
350
+ Guibin Zhang, Yanwei Yue, Xiangguo Sun, Guancheng Wan, Miao Yu, Junfeng Fang, Kun Wang, Tianlong Chen, and Dawei Cheng. 2025a. G-designer: Architecting multi-agent communication topologies via graph neural networks. Preprint, arXiv:2410.11782.
351
+ Zhenru Zhang, Chujie Zheng, Yangzhen Wu, Beichen Zhang, Runji Lin, Bowen Yu, Dayiheng Liu, Jingren Zhou, and Junyang Lin. 2025b. The lessons of developing process reward models in mathematical reasoning. arXiv preprint arXiv:2501.07301.
352
+ Mingchen Zhuge, Wenyi Wang, Louis Kirsch, Francesco Faccio, Dmitrii Khizbullin, and Jürgen Schmidhuber. 2024. Language agents as estimizable graphs. Preprint, arXiv:2402.16823.
353
+
354
+ # A Related work on LLM Reasoning Policy
355
+
356
+ Reward model is usually combined with different reasoning policies to enhance its effect such as majority voting (Wang et al., 2023), Chain of Thought (COT) (Wei et al., 2023) and Monte Carlo Tree Search (MCTS) (Browne et al., 2012). OmegaPRM (Luo et al., 2024) enhances reasoning with a divide-and-conquer MCTS strategy. ReST-MCTS (Zhang et al., 2024) refines reasoning traces using inferred stepwise rewards. RethinkMCTS (Li et al., 2024) improves code generation by leveraging execution feedback. In contrast, Critical Plan Step Learning (Wang et al., 2024e) employs hierarchical MCTS to generalize across reasoning tasks. Additionally, AlphaMath (Chen et al., 2024a) and TS-LLM (Feng et al., 2024) enhance reasoning by incorporating a value model and iterative tree search, with TS-LLM further leveraging an AlphaZero-like framework and policy distillation.
357
+
358
+ # B Model Performance
359
+
360
+ ![](images/d7a3dcc6d34ac5103f9a7aa6a2f943696a5432b6f67dd78f1388c5b22e89bfb0.jpg)
361
+ Figure 5: Performance of different models on our selected Math and SciBench dataset subproblems.
362
+
363
+ # C Case Study
364
+
365
+ # Complex Task Synthesis Case Study
366
+
367
+ Original Question:
368
+
369
+ A model for the surface area of a human body is given by
370
+
371
+ $$
372
+ S = 0. 1 0 9 1 w ^ {0. 4 2 5} h ^ {0. 7 2 5}.
373
+ $$
374
+
375
+ When ultraviolet radiation of wavelength UNK_0 (where UNK_0 = Answer[2] + 56.10 nm) strikes the skin, ..., a muscle fiber contracts by 3.5 cm and lifts a weight, assuming Hooke's law $F = -kx$ with $k = \mathrm{UNK\_1} = \mathrm{Answer}[0] + 736.00$ ; finally, please calculate
376
+
377
+ $$
378
+ \operatorname {A n s w e r} [ 0 ] \times \operatorname {A n s w e r} [ 1 ] \times \operatorname {A n s w e r} [ 2 ]
379
+ $$
380
+
381
+ and conclude: "The answer is therefore $\boxed{ANSWER}.$
382
+
383
+ # Decomposed Task Graph:
384
+
385
+ - Task 1 (no deps): Compute $S$ , record as Answer[2].
386
+ - Task 2 (dep: 1): Set UNK_0 = Answer[2] + 56.10, compute UV result, record as Answer[0].
387
+
388
+ - Task 3 (dep: 2): Set UNK_1 = Answer[0] + 736.00, compute work via Hooke's law, record as Answer[1].
389
+ - Task 4 (deps: 1,2,3): Compute the product Answer[0]·Answer[1]·Answer[2] and box the result.
390
+
391
+ # Agent Routing:
392
+
393
+ - Task 1 (Calculus) $\rightarrow$ gemini-2.0-flash-exp_GeometryExpert
394
+ - Task 2 (Matter) $\rightarrow$ gpt-4o_ElectromagnetismExpert
395
+ - Task 3 (Thermodynamics) $\rightarrow$ qwen2.5-max_Thermodynamics&OpticsExpert
396
+ - Task 4 (Aggregation) $\rightarrow$ gemini-2.0-flash-exp_AlgebraExpert
397
+
398
+ # D Prompt
399
+
400
+ # Prompt of Agents in the Pool
401
+
402
+ [gpt-4o_1]
403
+
404
+ model $=$ gpt-4o
405
+
406
+ role = MechanicsExpert
407
+
408
+ prompt = You are a highly knowledgeable mechanics expert in a multi-agent system. You are given
409
+
410
+ $\hookrightarrow$ a sub-task related to classical mechanics, statics, dynamics, kinematics, or fluid
411
+ $\hookrightarrow$ mechanics. First, read and understand the previous questions and answers from other
412
+ $\hookrightarrow$ agents. Identify the variables that have already been solved and ensure consistency with
413
+ $\hookrightarrow$ their results. Then, systematically break down your sub-task, applying relevant physical
414
+ $\hookrightarrow$ laws such as Newton's laws, conservation principles, or motion equations. Justify your
415
+ $\hookrightarrow$ reasoning, verify unit consistency, and cross-check with previous agent outputs before
416
+ $\hookrightarrow$ providing a well-explained solution.
417
+
418
+ [gpt-4o_2]
419
+
420
+ model $=$ gpt-4o
421
+
422
+ role = ElectromagnetismExpert
423
+
424
+ prompt = You are an expert in electromagnetism within a multi-agent system. You are assigned a $\leftrightarrow$ sub-task related to electric fields, magnetic fields, circuit analysis, or electromagnetic
425
+
426
+ $\hookrightarrow$ waves. First, read and understand the previous questions and answers from other agents,
427
+ $\hookrightarrow$ extract solved variables, and ensure logical consistency. Apply fundamental principles
428
+ $\hookrightarrow$ such as Maxwell's equations, Gauss's law, or Faraday's law to solve your sub-task
429
+ $\hookrightarrow$ systematically. Clearly outline your steps, justify the assumptions, and verify that your
430
+ $\hookrightarrow$ solution aligns with previous agents' work. If discrepancies arise, propose possible
431
+ $\hookrightarrow$ resolutions.
432
+
433
+ [gpt-4o_3]
434
+
435
+ model $=$ gpt-4o
436
+
437
+ role = Thermodynamics&OpticsExpert
438
+
439
+ prompt = You are an expert in thermodynamics and optics in a multi-agent system. Your role is $\leftrightarrow$ to solve a specific sub-task while ensuring coherence with previous agents' results.
440
+
441
+ $\hookrightarrow$ First, read and understand the previous discussions, extract solved variables, and align
442
+ $\hookrightarrow$ your approach with existing solutions. Apply principles such as the first and second laws
443
+ $\hookrightarrow$ of thermodynamics, heat transfer models, or optical laws (e.g., Snell's law, diffraction,
444
+ $\hookrightarrow$ and wave optics). Provide a detailed step-by-step solution, justify calculations, and
445
+ $\hookrightarrow$ validate numerical consistency with prior agent outputs. If uncertainties arise, suggest
446
+ $\hookrightarrow$ possible clarifications.
447
+
448
+ [gpt-4o_4]
449
+
450
+ model $=$ gpt-4o
451
+
452
+ role = InorganicChemistryExpert
453
+
454
+ prompt = You are an inorganic chemistry expert operating in a multi-agent system. Your sub-task
455
+
456
+ $\hookrightarrow$ may involve chemical bonding, periodic trends, reaction mechanisms, or coordination
457
+ $\hookrightarrow$ chemistry. Carefully review the previous questions and answers, identify already
458
+ $\hookrightarrow$ determined variables, and ensure consistency with past calculations. Apply relevant
459
+ $\hookrightarrow$ chemical principles to analyze and solve your assigned problem step by step. Provide
460
+ $\hookrightarrow$ balanced chemical equations, validate reaction feasibility, and explain your reasoning
461
+ $\hookrightarrow$ clearly. If your results depend on prior agents' outputs, verify their correctness and
462
+ $\hookrightarrow$ suggest refinements if necessary.
463
+
464
+ [gpt-4o_5]
465
+
466
+ model $=$ gpt-4o
467
+
468
+ role = OrganicChemistryExpert
469
+
470
+ prompt = You are an organic chemistry expert in a multi-agent system, responsible for solving a $\hookrightarrow$ sub-task related to molecular structures, reaction mechanisms, or synthetic pathways.
471
+
472
+ $\hookrightarrow$ First, review previous discussions, extract key solved variables, and ensure consistency
473
+ $\hookrightarrow$ with prior agent responses. Then, apply organic chemistry principles such as resonance
474
+ $\hookrightarrow$ effects, nucleophilic-electrophilic interactions, and reaction kinetics to derive a
475
+ $\hookrightarrow$ precise solution. Provide clear mechanistic explanations, reaction diagrams if necessary,
476
+ $\hookrightarrow$ and cross-check results to maintain logical coherence within the system.
477
+
478
+ Figure 6: The prompt of agents in the pool.
479
+
480
+ # Prompt of the Task Plan Generator
481
+
482
+ 1 1
483
+
484
+ You are an AI assistant specialized in generating structured prompts for domain-specific
485
+
486
+ $\hookrightarrow$ experts in a multi-agent system.
487
+
488
+ \*\*Task:\*\*
489
+
490
+ Given a subquestion, analyze its domain, required expertise, and problem complexity. Then,
491
+ $\hookrightarrow$ generate a structured prompt that precisely describes the expert's role in solving the
492
+ $\hookrightarrow$ problem. The generated prompt will be used for vector-based similarity matching to select
493
+ $\hookrightarrow$ the most appropriate agent from an agent pool.
494
+
495
+ **Prompt Format:**
496
+
497
+ "You are a [Expert Type], highly skilled in [Specific Knowledge Areas]. Your task is to analyze
498
+ $\hookrightarrow$ the problem by first reviewing previously solved variables and solutions from other agents
499
+ $\hookrightarrow$ in the multi-agent system. Apply domain-specific knowledge to reason rigorously and
500
+ $\hookrightarrow$ provide a well-structured, logically sound answer. If calculations are required, show all
501
+ $\hookrightarrow$ steps. If problem decomposition is needed, outline a systematic approach. Ensure
502
+ $\hookrightarrow$ consistency with previous solutions in the multi-agent system and resolve any
503
+ $\hookrightarrow$ discrepancies when necessary. Your role is to assist in solving complex reasoning problems
504
+ $\hookrightarrow$ with precision and alignment with the broader system."
505
+
506
+ **Instructions for Prompt Generation:**
507
+
508
+ 1. **Expert Type Selection**: Identify the most relevant expert type (e.g., MechanicsExpert,
509
+ $\hookrightarrow$ AlgebraExpert, ThermodynamicsExpert).
510
+ 2. **Specific Knowledge Areas**: Define the precise knowledge fields required to solve the
511
+ $\hookrightarrow$ problem.
512
+ 3. **Problem Scope & Complexity**: Determine whether the problem requires deep theoretical
513
+ $\hookrightarrow$ knowledge, numerical computation, or practical modeling.
514
+
515
+ **Output:**
516
+
517
+ Provide only the generated prompt without additional explanations."""
518
+
519
+ Figure 7: The prompt of the task plan generator.
520
+
521
+ # E Agent Selection Visualization
522
+
523
+ The agent selection distribution during the testing phase of Scibench-MAS-Easy reveals that Gemini-2.0-Flash-Exp and Qwen2.5-Max were the most frequently selected models after training.
524
+
525
+ ![](images/2079289bed5ec33d59b19adc4055d82ea544d272ae4e6e95ea217b176a6fd042.jpg)
526
+ Agent Selection Distribution
527
+
528
+ ![](images/25c3766df553a5379c5f146e0b7115b136e426548df84664dcbaeb8af496d462.jpg)
529
+ Figure 8: Testing stage on the easy-level tasks in Scibench-MAS.
530
+ Figure 9: Testing stage on the hard-level tasks in Scibench-MAS.
531
+
532
+ # F Hyperparameters
533
+
534
+ During both training and testing, a set of weighted factors and constraints guide agent selection, allowing for dynamic adjustments. Specifically, similarity_weight = 0.6 regulates the influence of subproblem-agent similarity, reputation_weight = 1.0 balances agent selection based on past performance, and cost_weight = 1.0 accounts for computational overhead. A THRESHOLD = 0.6 establishes
535
+
536
+ the similarity cutoff for specialized handling of certain subproblems, while EXPLORATIONCONST = 0.3 encourages periodic assignments to underutilized agents. During testing, hyperparameters can be adjusted to fine-tune the selection process—modifying similarity_weight and THRESHOLD controls the search scope, adjusting reputation_weight increases the weight of agent reputation in scoring, and tweaking cost_weight alters the impact of computational overhead, enabling a flexible trade-off between efficiency and performance. Finally, TOP_K = 3 restricts the number of candidate agents per subproblem, balancing exploration and efficiency in the selection process.
537
+
538
+ ![](images/885c9a668c9b04bafa6b099d97dd126371b468043c18114d1f7bfe023809210b.jpg)
539
+ Figure 10: Testing stage on the medium-level tasks in Scibench-MAS using reputation_weight 1.
540
+
541
+ ![](images/3dbda07e8ee4e708d2d9b185f11d34f29e3337aac7b93518a534f5bde46b4bbd.jpg)
542
+ Figure 11: Testing stage on the medium-level tasks in Scibench-MAS using reputation_weight 2.
543
+
544
+ ![](images/85a95b16771c4beea475aa4b173688c4f59e9cec34c4d06bfa83e25d2a28bde6.jpg)
545
+ Agent Answer Distribution
546
+ Figure 12: Testing stage on the medium-level tasks in Scibench-MAS without training.
547
+
548
+ Token Efficiency Table 1 also compares the average number of tokens consumed per task. ReSo maintains a relatively moderate token usage, which is significantly lower than certain baselines like DyLAN (14.6k vs 64.1k, 20.7k vs 77.8k). This balance between performance and computational cost underlines ReSo's practical efficiency in real-world, large-scale scenarios.
549
+
550
+ # G Reward Signal
551
+
552
+ We investigate the impact of different reward signals on system optimization, considering three approaches: (1) Rule-based, which provides strictly accurate, predefined evaluations for sub-task solutions; (2) General Reward Model, using Qwen2.5-Math-PRM-7B as a reward function without task-specific fine-tuning; and (3) Fine-tuned Reward Model, i.e., our CRM proposed in 3.3.3. Figure 3(c) presents the results of training our MAS under these reward schemes on the SciBench-MAS dataset. The rule-based reward yields the best results, confirming the importance of precise reward signals. Besides, our CRM brings a slight improvement compared to the original Qwen2.5-Math-PRM-7B model. We also observe an instance of reward hacking when using the Qwen reward model: specifically, Qwen2.5-Max tends to receive inflated scores when acting as the reasoning agent. As a result, during inference, the MAS disproportionately selects Qwen2.5-Max to handle sub-tasks, even in cases where it does not necessarily produce the best solutions.
553
+
554
+ # H CRM,ORM,PRM
555
+
556
+ Our Cooperative Reward Model (CRM) is inspired by OpenAI's PRM, but it has been extended and adapted to the multi-agent system (MAS) setting. In our complex tasks, multiple sub-tasks exist, and the CRM scores each sub-task's response based on the outputs from prior agents. While conceptually similar to PRM—where each sub-task can be seen as a step—PRM cannot be directly applied to our MAS setting due to fundamental structural differences.
557
+
558
+ # I Comparison with Chain-of-Thought (CoT) Methods
559
+
560
+ We would like to clarify that the prompts used in our single-model evaluation experiments already support step-by-step reasoning, thus reflecting Chain-of-Thought (CoT) style outputs. These models are capable of multi-step reasoning and demonstrate CoT-style thinking when tackling complex problems. However, as demonstrated in our results, these CoT-style single-model approaches perform poorly on tasks with high complexity and combinatorial reasoning. As task difficulty increases, even the strongest single LLMs exhibit a significant drop in accuracy—approaching $0\%$ at the highest difficulty level. This clearly indicates that "step-by-step thinking" alone is insufficient for solving the kinds of deep combinatorial reasoning tasks we designed. Our proposed method, ReSo, substantially outperforms these CoT-style baselines. In addition, ReSo introduces structural and functional advantages over traditional CoT methods. CoT follows a linear reasoning path, whereas ReSo constructs a task graph composed of multiple subtasks, each solvable independently by different expert agents. This allows for horizontal task expansion and fine-grained skill decomposition. A key limitation of CoT is its dependence on a single model's context length, reasoning capabilities, and domain knowledge. ReSo addresses these limitations by decomposing tasks, dynamically routing them, assigning subtasks to the most appropriate agents, and using reward mechanisms to drive learning.
561
+
562
+ # J Qwen Model Dependence
563
+
564
+ We would like to clarify that the performance gains observed in ReSo primarily stem from the task decomposition and multi-agent cooperation architecture, rather than solely from a stronger base model. Our approach consists of two stages. The first stage uses an LLM to decompose the task, and the second stage selects the most suitable agents to handle the subproblems. To further demonstrate the effectiveness of our framework, we conducted a new experiment. Even when Qwen-sfted is used for task decomposition, single-agent approaches still fail. This emphasizes that cooperation among agents is necessary. Additionally, our fine-tuned Qwen-7B model performs comparably to GPT-4o for task decomposition, but it is only when subtasks are assigned to specialized agents that the system achieves significant improvements in performance.
565
+
566
+ Table 3: Qwen model dependence
567
+
568
+ <table><tr><td>model</td><td>Easy</td><td>Medium</td><td>Hard</td></tr><tr><td>Qwen-sfted + (no ReSo) single agent</td><td>27.5</td><td>5.6</td><td>4.5</td></tr><tr><td>GPT-4o + ReSo</td><td>71.4</td><td>43.8</td><td>34.8</td></tr><tr><td>Qwen-sfted + ReSo</td><td>79.1</td><td>56.2</td><td>33.7</td></tr></table>
569
+
570
+ # K Computational Complexity and Runtime
571
+
572
+ Inference Parallelism. Independent DAG subnodes can be executed in parallel, mitigating runtime overhead. Despite a higher token usage, ReSo achieves greater accuracy gains, justifying the cost:
573
+
574
+ Table 4: Token usage and runtime comparison
575
+
576
+ <table><tr><td>Method</td><td>Tokens</td><td>Time (h)</td></tr><tr><td>MetaGPT</td><td>16.1 k</td><td>3.2</td></tr><tr><td>DyLAN</td><td>64.1 k</td><td>8.0</td></tr><tr><td>GPTSwarm</td><td>14.9 k</td><td>1.3</td></tr><tr><td>GDesigner</td><td>16.9 k</td><td>4.0</td></tr><tr><td>ReSo</td><td>25.9 k</td><td>4.1 (3 training + 1.1 testing)</td></tr></table>
data/2025/2503_02xxx/2503.02390/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:620174377803c02192d74bc06bde980d794b3e08c8b91069908c05799816fb8c
3
+ size 543504
data/2025/2503_02xxx/2503.02390/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_02xxx/2503.02407/659ebb2e-73c9-4ddb-871a-4d8552af4b45_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_02xxx/2503.02407/659ebb2e-73c9-4ddb-871a-4d8552af4b45_model.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_02xxx/2503.02407/659ebb2e-73c9-4ddb-871a-4d8552af4b45_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f20e376ff0ce42513d64383fd2cb81822b57e1b06938048fdbbf3cc8924d6bd
3
+ size 6656441
data/2025/2503_02xxx/2503.02407/full.md ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_02xxx/2503.02407/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23b133c7d5c3cb23c0ebdcbf8e14aeac0681f0fe424a5f8359a20b3ee8d718eb
3
+ size 1429353
data/2025/2503_02xxx/2503.02407/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_02xxx/2503.02424/8601c576-a221-400a-ab72-3a3c50fe8e1d_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_02xxx/2503.02424/8601c576-a221-400a-ab72-3a3c50fe8e1d_model.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_02xxx/2503.02424/8601c576-a221-400a-ab72-3a3c50fe8e1d_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3433dbcc404d376a4adeaffa74208be5de42a1ec7ca5e67b0d74fc1740f2e4a
3
+ size 35279101
data/2025/2503_02xxx/2503.02424/full.md ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_02xxx/2503.02424/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5305f0ec99e0bdeeb96b008b7cf79f389cfed0aeeac16057192ecd7cc5515ec4
3
+ size 4205671
data/2025/2503_02xxx/2503.02424/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_02xxx/2503.02445/79b9e0b6-153b-484b-87de-9d51773ebdf4_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_02xxx/2503.02445/79b9e0b6-153b-484b-87de-9d51773ebdf4_model.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_02xxx/2503.02445/79b9e0b6-153b-484b-87de-9d51773ebdf4_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78d7f36e9da142ea1bb1509c26ca95d74e1c03353a4b0fdef7c634547dd0c137
3
+ size 1856655
data/2025/2503_02xxx/2503.02445/full.md ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_02xxx/2503.02445/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75321e431ebfbf462674d3475f036a14c5c7eeaf66df0e4d6378f52c2c80b0e9
3
+ size 1850867
data/2025/2503_02xxx/2503.02445/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_02xxx/2503.02450/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ce43e0b930129c0baabbce010f113ab76084fc9cb96b7e0c3b633bb1fa35eeb
3
+ size 704127