Chelsea707 commited on
Commit
2b1d317
·
verified ·
1 Parent(s): a0872fd

MinerU Batch 4eac7c8c-d60b-45af-9b7d-c7fd538645b4 (Part 7/8)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +8 -0
  2. data/2025/2504_13xxx/2504.13367/15d0ba1c-4319-4c0c-857d-c4b920307365_content_list.json +0 -0
  3. data/2025/2504_13xxx/2504.13367/15d0ba1c-4319-4c0c-857d-c4b920307365_model.json +0 -0
  4. data/2025/2504_13xxx/2504.13367/15d0ba1c-4319-4c0c-857d-c4b920307365_origin.pdf +3 -0
  5. data/2025/2504_13xxx/2504.13367/full.md +496 -0
  6. data/2025/2504_13xxx/2504.13367/images/0866efb5f2950f68686baa975f2bd71c9f7d2b8a511fd667d849d97f15d4342b.jpg +3 -0
  7. data/2025/2504_13xxx/2504.13367/images/13527b3c58070716414b69e0269d27b00d185a55720e3870c1e52b0bb124ad8e.jpg +3 -0
  8. data/2025/2504_13xxx/2504.13367/images/1fdea51374a5af38e976d087dd52dada79a30f34c4e95db72a81d13f280ac896.jpg +3 -0
  9. data/2025/2504_13xxx/2504.13367/images/30724602ecef27f0168d78aa0666aa13581e228b0798a45453395a9c78407762.jpg +3 -0
  10. data/2025/2504_13xxx/2504.13367/images/352e4f94f22f607ff8f695359842e116f03471cb231b1b06f92cb664ff578a85.jpg +3 -0
  11. data/2025/2504_13xxx/2504.13367/images/4a071855232fa6e57dbb04e6030e3559d5f9ce16318980b3f25950ab21835b1c.jpg +3 -0
  12. data/2025/2504_13xxx/2504.13367/images/4b0fcdcdff33836130a2a935b866fde64fff26901df853096f493aa05ca757b6.jpg +3 -0
  13. data/2025/2504_13xxx/2504.13367/images/538aa8e65541ed0336c847201d371c2296709a4338ae65ae25bf71ae082642dc.jpg +3 -0
  14. data/2025/2504_13xxx/2504.13367/images/5432888fe6ad055a4a2f36e42336210e300e160a20f797c9a35f05da657395b4.jpg +3 -0
  15. data/2025/2504_13xxx/2504.13367/images/5627bb5640e02f7267b21db83dc4183b1113146a8b00c31d9b9a477cafe2a540.jpg +3 -0
  16. data/2025/2504_13xxx/2504.13367/images/57762d1e60e652fd131f3e09e77d78a77ae5436d64a718769ad214c606cb7b17.jpg +3 -0
  17. data/2025/2504_13xxx/2504.13367/images/581acd48568aa80c096caa667172e0fe4c3edfd17e5ec1c57c1612fbadf7a882.jpg +3 -0
  18. data/2025/2504_13xxx/2504.13367/images/59286ebd295a2115c6db701e88015e6a75246117891f5cdd7adecace969a81f0.jpg +3 -0
  19. data/2025/2504_13xxx/2504.13367/images/5a035e480b4f48c7dedaadcca1309d1e06a7bde2d6b26423a9e2ecd7eb4a2adb.jpg +3 -0
  20. data/2025/2504_13xxx/2504.13367/images/6700178cb0c9508c948988b611b46ce8b0c8317c142f6d52afd4180f3a6d8158.jpg +3 -0
  21. data/2025/2504_13xxx/2504.13367/images/682c4a69257fd7dd81c194606163169380868bc470ed364ffdf64c1588251b44.jpg +3 -0
  22. data/2025/2504_13xxx/2504.13367/images/72e35b4c40e40737d9860dacffe653edfe5e70af9b416602ff380f5cb1ee5ca4.jpg +3 -0
  23. data/2025/2504_13xxx/2504.13367/images/759e54dcb199c1868fd8ab257be18fc25efe607549f01a548c1499fa91e96d72.jpg +3 -0
  24. data/2025/2504_13xxx/2504.13367/images/811d4bc555cff8efb45585028fa7f9cb6319de931fd55724b0e10f068a0e20f7.jpg +3 -0
  25. data/2025/2504_13xxx/2504.13367/images/87364e66d83fbfd29ef9111af5d78b99b22d3af1bd107989f236b69456627d79.jpg +3 -0
  26. data/2025/2504_13xxx/2504.13367/images/89899d90f80f980839663034641a61668a0142bcfe1e2542c837d3074ea4dd84.jpg +3 -0
  27. data/2025/2504_13xxx/2504.13367/images/8e21f1e860839d4ea22a7075d2360b946e6269ff7810d2faf192f17d790a918f.jpg +3 -0
  28. data/2025/2504_13xxx/2504.13367/images/90e3ee2c43c84fd9252311bc8fc314f8fe11b34af70ffe25a6af3e71daa9c87c.jpg +3 -0
  29. data/2025/2504_13xxx/2504.13367/images/92e86fd9ceb7f94163041bf0cb974852de5485b8ade891ce3d1a7a653157e62a.jpg +3 -0
  30. data/2025/2504_13xxx/2504.13367/images/99165ac369c58d493982eab1ef883be89d4dfbe8888c47c5c9d898fe2c7be8f4.jpg +3 -0
  31. data/2025/2504_13xxx/2504.13367/images/a069990096b936224abe029895e3f13e9a6b77ab66aefc1078003d0019c494be.jpg +3 -0
  32. data/2025/2504_13xxx/2504.13367/images/a0e2646633e86a7c143335f4239d06298e073b34198d636c80a6bebb400d4c1e.jpg +3 -0
  33. data/2025/2504_13xxx/2504.13367/images/a39233c56e1bc88f704df64f0f6df4fed29ff764eb0ee298d3320f9a485427ca.jpg +3 -0
  34. data/2025/2504_13xxx/2504.13367/images/b7a12f3ebd5be5d280f2d282c420536095e032f04755419c808ae068d4b326c8.jpg +3 -0
  35. data/2025/2504_13xxx/2504.13367/images/c3d399dab9ecdfa2681a845b936a04f943ab15aa7fbd7fcbbc3b517f8c1c318e.jpg +3 -0
  36. data/2025/2504_13xxx/2504.13367/images/c71bfc7f709c37fe7ea407702dfdf8bf2978a601cc473853b26a0955de2d0629.jpg +3 -0
  37. data/2025/2504_13xxx/2504.13367/images/d7f678edad861cb21d30f22416b08ac0469573f0fc0e10be631d47bf4e7d63a4.jpg +3 -0
  38. data/2025/2504_13xxx/2504.13367/images/ddca224efe50d1837f1143603cc77049dea6f0e73de6b9b754b90c3fe2772c4b.jpg +3 -0
  39. data/2025/2504_13xxx/2504.13367/images/dee4ed60354ef72a6680045f881545b825ea445cb6273f9f580a4f803fb4fd33.jpg +3 -0
  40. data/2025/2504_13xxx/2504.13367/images/df13ded6d9154f47a91496e336a31a618bb28449dbf7c3458660c781ce2b3cbb.jpg +3 -0
  41. data/2025/2504_13xxx/2504.13367/images/df96533c806e323da66fec49e0b56d5b2dace76596c95829ff5e4090b9f55a9c.jpg +3 -0
  42. data/2025/2504_13xxx/2504.13367/images/dfdb6af3d2ca6c4f8f6bfc981c5b902e3a8630f58dc77711a06b10546d9fe515.jpg +3 -0
  43. data/2025/2504_13xxx/2504.13367/images/e1ba5e633e285ccab04d1c816d0cb6775a0ca8921c5a15bbdc4c91e69ae2d042.jpg +3 -0
  44. data/2025/2504_13xxx/2504.13367/images/e1d81fb4d67977d5ae0b112741cee7bd473e8b3fee75518a8801716d043709f9.jpg +3 -0
  45. data/2025/2504_13xxx/2504.13367/images/f3af9c4616ec29961937b3d6f7fac9ec81c7c1da8057695da4898fe6d0ee3661.jpg +3 -0
  46. data/2025/2504_13xxx/2504.13367/images/fb9b1849b069a7edf1d21ec778b6979d82f1d253fbd649b31376d019aad7e044.jpg +3 -0
  47. data/2025/2504_13xxx/2504.13367/images/fc16d8aaca46c41ee51cad243c325e51becff01a305939429711c020680e223b.jpg +3 -0
  48. data/2025/2504_13xxx/2504.13367/images/fc31f37b8bec8b05e32f03990a4b59591c248971304d9e6cc93b1bb1a73fe0c5.jpg +3 -0
  49. data/2025/2504_13xxx/2504.13367/layout.json +0 -0
  50. data/2025/2504_13xxx/2504.13406/2cedbc56-01e1-47be-9173-a7dd756bcc1a_content_list.json +1873 -0
.gitattributes CHANGED
@@ -1061,3 +1061,11 @@ data/2025/2504_14xxx/2504.14064/a0816722-2873-4cbb-a0de-6da504833391_origin.pdf
1061
  data/2025/2504_14xxx/2504.14071/41528a47-0d16-4418-9970-d7f41254b176_origin.pdf filter=lfs diff=lfs merge=lfs -text
1062
  data/2025/2504_14xxx/2504.14120/998614dc-cc02-4ff3-a8f5-01a05e7aa39d_origin.pdf filter=lfs diff=lfs merge=lfs -text
1063
  data/2025/2504_14xxx/2504.14122/7601426d-f0db-4083-ba0b-a4d9c659cf89_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
1061
  data/2025/2504_14xxx/2504.14071/41528a47-0d16-4418-9970-d7f41254b176_origin.pdf filter=lfs diff=lfs merge=lfs -text
1062
  data/2025/2504_14xxx/2504.14120/998614dc-cc02-4ff3-a8f5-01a05e7aa39d_origin.pdf filter=lfs diff=lfs merge=lfs -text
1063
  data/2025/2504_14xxx/2504.14122/7601426d-f0db-4083-ba0b-a4d9c659cf89_origin.pdf filter=lfs diff=lfs merge=lfs -text
1064
+ data/2025/2504_13xxx/2504.13367/15d0ba1c-4319-4c0c-857d-c4b920307365_origin.pdf filter=lfs diff=lfs merge=lfs -text
1065
+ data/2025/2504_13xxx/2504.13406/2cedbc56-01e1-47be-9173-a7dd756bcc1a_origin.pdf filter=lfs diff=lfs merge=lfs -text
1066
+ data/2025/2504_13xxx/2504.13592/01161709-a55a-4c75-965a-7a7abcaf1d27_origin.pdf filter=lfs diff=lfs merge=lfs -text
1067
+ data/2025/2504_13xxx/2504.13626/b7232481-6564-415c-866d-936e5cf86601_origin.pdf filter=lfs diff=lfs merge=lfs -text
1068
+ data/2025/2504_13xxx/2504.13650/709aba9f-dad4-48b4-a551-3edbed08a781_origin.pdf filter=lfs diff=lfs merge=lfs -text
1069
+ data/2025/2504_13xxx/2504.13707/3ceae00c-ae80-4a3a-854a-18a112f05be2_origin.pdf filter=lfs diff=lfs merge=lfs -text
1070
+ data/2025/2504_13xxx/2504.13837/64f2b5fa-c253-4510-b5d2-ad303831a936_origin.pdf filter=lfs diff=lfs merge=lfs -text
1071
+ data/2025/2504_16xxx/2504.16113/1e364459-1cd0-4c1a-a05b-130dcb3e3873_origin.pdf filter=lfs diff=lfs merge=lfs -text
data/2025/2504_13xxx/2504.13367/15d0ba1c-4319-4c0c-857d-c4b920307365_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2504_13xxx/2504.13367/15d0ba1c-4319-4c0c-857d-c4b920307365_model.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2504_13xxx/2504.13367/15d0ba1c-4319-4c0c-857d-c4b920307365_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0551d50fd730ea4af103ccf232ea77e6fee1ebeb00c4f36b93da35a0bfc00745
3
+ size 2982042
data/2025/2504_13xxx/2504.13367/full.md ADDED
@@ -0,0 +1,496 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ![](images/b7a12f3ebd5be5d280f2d282c420536095e032f04755419c808ae068d4b326c8.jpg)
2
+
3
+ # THOUGHTTERMINATOR: Benchmarking, Calibrating, and Mitigating Overthinking in Reasoning Models
4
+
5
+ Xiao Pu* Michael Saxon* Wenyue Hua William Yang Wang
6
+
7
+ University of California, Santa Barbara
8
+
9
+ Contact: xiao_pu@ucsb.edu, saxon@ucsb.edu
10
+
11
+ # Abstract
12
+
13
+ Reasoning models have demonstrated impressive performance on difficult tasks that traditional language models struggle at. However, many are plagued with the problem of overthinking—generating large amounts of unnecessary tokens which don't improve accuracy on a question. We introduce approximate measures of problem-level difficulty and demonstrate that a clear relationship between problem difficulty and optimal token spend exists, and evaluate how well calibrated a variety of reasoning models are in terms of efficiently allocating the optimal token count. We find that in general, reasoning models are poorly calibrated, particularly on easy problems. To evaluate calibration on easy questions we introduce DUMB500, a dataset of extremely easy math, reasoning, code, and task problems, and jointly evaluate reasoning model on these simple examples and extremely difficult examples from existing frontier benchmarks on the same task domain. Finally, we introduce THOUGHTTERMINATOR, a training-free black box decoding technique that significantly improves reasoning model calibration.
14
+
15
+ # 1 Introduction
16
+
17
+ Investment in improving the capabilities of language models has recently turned from data- and train-time-scaling to inference-scaling, or training so-called reasoning models to expend more runtime compute generating chains of thought (Wei et al., 2022), debate (Liang et al., 2023), and self-corrections (Pan et al., 2024) in order to more robustly and correctly answer queries (Wu et al., 2024).
18
+
19
+ On average, there is a direct relationship between amount of inference spend and performance on benchmarks of a variety of "reasoning tasks" (Jaech et al., 2024).
20
+
21
+ Under the inference scaling paradigm, controlling costs is critical. Unfortunately, open reasoning models such as DeepSeek r1 (DeepSeek-AI et al., 2025) and QwQ (Qwen, 2025) have demonstrated a tendency to expend unnecessary inference tokens after the answer has already could be generated, a problem referred to as overthinking (Chen et al., 2024).
22
+
23
+ We need to precisely define overthinking in order to mitigate it. Chen et al. (2024) define overthinking as the amount of times the model repeats the correct answer in its intermediate reasoning chain. From this definition, they used supervised fine-tuning and direct preference optimization to train reasoning models to prefer to select the shortest answer. Similar work applied knowledge distillation from non-reasoning models to blend their preference to answer concisely with the reasoning models' better performance (Yang et al., 2025). However, both of these methods require retraining, a process that may be costly or have unintended consequences on performance.
24
+
25
+ Training-free methods which seek to manage overthinking include selective invocation of chain-of-thought on tasks where it has known benefit (Sprague et al., 2024) early stopping
26
+
27
+ ![](images/fc31f37b8bec8b05e32f03990a4b59591c248971304d9e6cc93b1bb1a73fe0c5.jpg)
28
+ Figure 1: Question-level difficulty vs average token spend across models for three reasoning datasets. Difficulty scores are scaled by 10 and mapped to integers from 1 to 10 for readability. We observe a clear relationship between question difficulty and token spend distribution.
29
+
30
+ ![](images/5627bb5640e02f7267b21db83dc4183b1113146a8b00c31d9b9a477cafe2a540.jpg)
31
+
32
+ ![](images/72e35b4c40e40737d9860dacffe653edfe5e70af9b416602ff380f5cb1ee5ca4.jpg)
33
+
34
+ ![](images/ddca224efe50d1837f1143603cc77049dea6f0e73de6b9b754b90c3fe2772c4b.jpg)
35
+
36
+ of reasoning chains using probe-based confidence of final answer tokens (Fu et al., 2024), or simply eliciting reasoning model-like behavior from non-reasoning models using continuing phrases like "wait...", which can be halted at any time (Muennighoff et al., 2025). Limitations of these methods include requiring external knowledge of task type, white-box access to the base model, or the use of non-reasoning models for precise control (Yu et al., 2025).
37
+
38
+ In this work we seek to analyze the difficulty calibration of token spend in reasoning models. Starting from the supposition that more difficult problems require more thought, we first characterize this difficulty-cost relationship in a variety of open reasoning models across three reasoning datasets—MATH500 (Lightman et al., 2023), GPQA (Rein et al., 2023), and ZebraLogic (Lin et al., 2024)—allowing us to introduce a difficulty-calibrated measure of overthinking.
39
+
40
+ As these three existing datasets only allow us to assess overthinking in reasoning models on hard problems, we introduce DUMB500, a dataset of 'easy' queries to explore overthinking on easy inputs.
41
+
42
+ With the overthinking problem formally defined, we introduce THOUGHTTERMINATOR, a training-free, black box decoding strategy to mitigate overthinking using difficulty-calibrated conditioning. We show that THOUGHTTERMINATOR is a simple and effective way to control overthinking in reasoning models without requiring any access to gradients or training.
43
+
44
+ # 2 Difficulty Calibration in Reasoning Models
45
+
46
+ This work is concerned with how optimally reasoning models allocate token spend $Sp$ , or total number of tokens generated in a given sample to respond to an input.
47
+
48
+ Given that increased inference scale leads to higher performance across a variety of reasoning tasks, it is reasonable to hypothesize that the difficulty of a question correlates with optimal token spend. We characterize the difficulty $D$ of a given question $q$ pair for model $\mathcal{M}$ as the simple inaccuracy rate of that model over that pair over $n$ samples of that question $q$ and it's gold answer $a$ .
49
+
50
+ $$
51
+ D _ {\mathcal {M}} (q, a) = p (\hat {a} \sim \mathcal {M} (q) \neq a) \approx \sum_ {n} \mathbb {1} (\mathcal {M} (q) \neq a) / n \tag {1}
52
+ $$
53
+
54
+ We can compute a multi-model difficulty estimate $\bar{D}$ of $q$ as the expected difficulty $\mathbb{E}[D(q,a)]$ over a class of models $\mathbf{M}$ . While this definition is model-dependent, it captures an operational notion of difficulty that is both reproducible and relevant for analyzing inference efficiency under current LLMs.
55
+
56
+ $$
57
+ \bar {D} (q) = \mathbb {E} [ D (q, a) ] \approx \sum_ {m \in \mathbf {M}} \sum_ {n} \mathbb {1} (\mathcal {M} (q) \neq a) / | \mathbf {M} | n \tag {2}
58
+ $$
59
+
60
+ <table><tr><td>Model</td><td>Local overthinking \(O_{env} \downarrow\)</td><td>Global overthinking \(O_g \downarrow\)</td></tr><tr><td colspan="3">Non-reasoning language models</td></tr><tr><td>Qwen2-7B-Instruct</td><td>291</td><td>219</td></tr><tr><td>Llama-3.2-1B-Instruct</td><td>542</td><td>354</td></tr><tr><td>Llama-3.2-3B-Instruct</td><td>708</td><td>473</td></tr><tr><td>Llama-3.1-8B-Instruct</td><td>1971</td><td>1755</td></tr><tr><td>gemma-2-2b-it</td><td>148</td><td>152</td></tr><tr><td>gemma-2-9b-it</td><td>131</td><td>161</td></tr><tr><td>gemma-2-27b-it</td><td>178</td><td>187</td></tr><tr><td>deepseek-11m-7b-chat</td><td>155</td><td>90</td></tr><tr><td colspan="3">Reasoning language models</td></tr><tr><td>QwQ-32B-Preview</td><td>2923</td><td>3698</td></tr><tr><td>QwQ-32B</td><td>13662</td><td>11248</td></tr><tr><td>DeepSeek-R1-Distill-Qwen-1.5B</td><td>5730</td><td>4262</td></tr><tr><td>DeepSeek-R1-Distill-Llama-8B</td><td>4232</td><td>5755</td></tr><tr><td>DeepSeek-R1-Distill-Qwen-7B</td><td>3881</td><td>4001</td></tr></table>
61
+
62
+ Table 1: Local and global overthinking scores (rounded to integers).
63
+
64
+ Each answer $a_i$ incidentally sampled from $\mathcal{M}$ in response to question $q$ is associated with its own token spend $Sp_{\mathcal{M}}(a_i)$ . Is there a relationship between the difficulty of each question and the token spend that naturally occurs?
65
+
66
+ We assess the difficulty $\bar{D}$ and token spend $Sp_{\mathcal{M}}$ using reasoning and non-reasoning models from the DeepSeek (DeepSeek-AI et al., 2025), Qwen (Yang et al., 2024; Qwen, 2025), Gemma (Mesnard et al., 2024), and LLaMa (Dubey et al., 2024) families for all questions in the MATH500 (Lightman et al., 2023), GPQA (Rein et al., 2023), and ZebraLogic (Lin et al., 2024) datasets.
67
+
68
+ Figure 1 contains scatter plots of $D_{\mathcal{M}}$ and $Sp(a)$ for each answer $a$ from DeepSeek-R1-7B for all three datasets. We observe that similar to the dataset & model-wise relationships between performance and token spend documented in prior work (Muennighoff et al., 2025), there also exists a clear relationship between question-level difficulty and average token spend.
69
+
70
+ Additionally, we note considerable variance in the token spend between answer samples for each question. These reasoning models exhibit considerable inconsistency in their efficiency between samples. This leads to two natural questions:
71
+
72
+ 1. How well-calibrated are reasoning models in consistently realizing their optimal token spend per-question?
73
+ 2. Is it possible to improve the calibration of reasoning models in their token spend?
74
+
75
+ # 2.1 Quantifying Overthinking
76
+
77
+ We formalize observational overthinking, or the failure in consistency a reasoning model has at realizing the minimum possible token spend per question.
78
+
79
+ The observed minimum spend of a question is the shortest reasoning chain of its full set of correct model-generated answers. We measure observational overthinking in terms of the difference between a model's typical token spend and this observed minimum. For questions sampled from dataset $\mathcal{D}$ , the global overthinking score $O_g$ of a model is the mean difference between the length of each reasoning chain and the global observed minimum spend for each question.
80
+
81
+ $$
82
+ O _ {g} (\mathcal {M}) = \sum_ {q \in \mathcal {D}} \left(\mathbb {E} [ S p (a \sim \mathcal {M} | q) ] - \min _ {\mathcal {M} _ {i} \in \mathbf {M}} \left(S p (a \sim \mathcal {M} _ {i} | q)\right)\right) / | \mathcal {D} | \tag {3}
83
+ $$
84
+
85
+ The local envelope overthinking score $O_{\mathrm{env}}$ is the mean difference between the maximum and minimum spends for each question for each model.
86
+
87
+ ![](images/90e3ee2c43c84fd9252311bc8fc314f8fe11b34af70ffe25a6af3e71daa9c87c.jpg)
88
+ Figure 2: DUMB500 dataset composition and grading method. The dataset contains four subsets, CHAT, CODE, TASK & MATH, which are each graded with subset-specific methods. MATH are graded with traditional answer pairs. CHAT and TASK are graded using a combination of LM-judged rubrics and where appropriate, answers. CODE outputs are generated as test case coverage.
89
+
90
+ $$
91
+ O _ {\text {e n v}} (\mathcal {M}) = \sum_ {q \in \mathcal {D}} \left(\max \left[ S p (a \sim \mathcal {M} | q) \right] - \min \left(S p (a \sim \mathcal {M} | q)\right)\right) / | \mathcal {D} | \tag {4}
92
+ $$
93
+
94
+ Table 1 presents the calibration scores for the full set of LLama, Qwen, Gemma, and DeepSeek models we evaluated on the three datasets. These calibration scores represent expected quantities of tokens wasted, as they are averages in excess of minimum spend values. Lower is better. As expected, the reasoning models with propensity to overthink have considerably higher overthinking scores than the non-reasoning models.
95
+
96
+ One weakness of our overthinking evaluation so far is that we have very few questions that have a low difficulty but high overthinking tendency. This is because reasoning models are evaluated mainly on challenging frontier tasks. In the next section we introduce a resource to mitigate this.
97
+
98
+ # 3 Extending Overthinking Evaluation with DUMB500
99
+
100
+ While it is common knowledge that reasoning models tend to overthink on simple queries (Chen et al., 2024), no resource has been proposed to systematically evaluate this tendency on simple, straightforward questions.
101
+
102
+ To address this gap, we introduce DUMB500, a dataset specifically designed to evaluate models on simple questions that humans can answer effortlessly. The goal is not to challenge models with intricate logic but rather to assess their fundamental ability to recognize simplicity and provide concise, correct responses. To the best of our knowledge, DUMB500 is the first dataset explicitly focused on extremely simple (and sometimes deliberately naive) questions. DUMB500 consists of 500 manually curated questions spanning four domains:
103
+
104
+ - Mathematics (Math): Basic arithmetic, comparisons, geometric properties, and logical reasoning.
105
+ - Conversational Interaction (Chat): Casual dialogue, self-reflection, common knowledge, and basic object interactions.
106
+
107
+ ![](images/4a071855232fa6e57dbb04e6030e3559d5f9ce16318980b3f25950ab21835b1c.jpg)
108
+ Figure 3: Total difficulty distribution of the four datasets we evaluate in this work. Difficulty scores are scaled by 10 and mapped to integers from 1 to 10 for readability. By including DUMB500 in our analysis, we are able to characterize the overthinking behavior of current opening reasoning models more consistently across the difficulty spectrum.
109
+
110
+ - Programming & Computing (Code): Fundamental coding concepts, including variables, loops, conditionals, and data structures.
111
+ - Task Execution (Task): Simple natural language processing tasks such as paraphrasing, translation, and basic writing.
112
+
113
+ Each question is designed to be trivial for humans, requiring minimal cognitive effort, while still serving as a litmus test for language models. The dataset allows us to evaluate models based on two key dimensions:
114
+
115
+ Accuracy: Can the model correctly answer simple questions?
116
+ - Efficiency: Can the model provide concise answers without unnecessary elaboration?
117
+
118
+ To construct the dataset, we manually crafted the questions to ensure their simplicity and logical clarity. We also ensured diversity across categories, covering a range of common knowledge, arithmetic, and practical applications. The full list of question classes with their descriptions are listed in subsection A.1. Figure 2 shows the distribution of question types in DUMB500 as well as sample questions and answers.
119
+
120
+ # 3.1 Evaluation techniques for DUMB500
121
+
122
+ In addition to the extremely simple MATH questions presented in DUMB500, which are evaluated using simple accuracy methods, identical to MATH500, GPQA, and ZebraLogic, we also introduced CHAT, CODE, and TASK questions, which require more sophisticated evaluation. They are evaluated as follows:
123
+
124
+ CODE questions include a set of test cases for the program described in the prompt. A python-based autograder checks that the requirements are met.
125
+
126
+ CHAT questions belong to one of seven subtasks (eg., greetings, acknowledgement). All chat answers are evaluated according to a set of generic requirements, such as appropriateness and conciseness. Depending on the subtask, specific requirements such as precision and accuracy are checked. When accuracy assessment is required, an answer is also provided.
127
+
128
+ TASK questions generally include instructions for the assistant to produce some kind of writing or answer some work-related question. In addition to using the same generic requirements as CHAT, TASK questions have one or more question-specific requirements which check that the implicit instructions in the prompt are followed (See Figure 2). The CHAT and TASK requirements are checked using an LM (gpt-4o) as a judge.
129
+
130
+ ![](images/a39233c56e1bc88f704df64f0f6df4fed29ff764eb0ee298d3320f9a485427ca.jpg)
131
+
132
+ ![](images/a0e2646633e86a7c143335f4239d06298e073b34198d636c80a6bebb400d4c1e.jpg)
133
+
134
+ ![](images/5a035e480b4f48c7dedaadcca1309d1e06a7bde2d6b26423a9e2ecd7eb4a2adb.jpg)
135
+ Figure 4: Relationship between average token spend $Sp$ (Tokens) and average score for the evaluated models on each subset of DUMB500.
136
+
137
+ ![](images/30724602ecef27f0168d78aa0666aa13581e228b0798a45453395a9c78407762.jpg)
138
+
139
+ ![](images/5432888fe6ad055a4a2f36e42336210e300e160a20f797c9a35f05da657395b4.jpg)
140
+
141
+ # 3.2 From Dumb to Hard Questions
142
+
143
+ We evaluate the same set of models as in Table 1 on DUMB500 and analyze their accuracy and token spend across different subsets. Figure 3 depicts the distribution of questionwise difficulty scores across the MATH subset of DUMB500, MATH500, GPQA, and ZebraLogic, assessed using those models. This confirms that DUMB500-MATH fills in a gap in our analysis, adding a considerable quantity of easy questions with which to analyze overthinking.
144
+
145
+ Figure 4 shows the relationship between model-level accuracy and token spend for the tested models. As expected, on these simple math questions there is no positive relationship between token spend and accuracy, as these questions are extremely easy. For the other domains, we observe a negative correlation<sup>1</sup> between token spend and evaluation requirement pass rate (labeled accuracy).
146
+
147
+ # 4 THOUGHTTERMINATOR
148
+
149
+ Reasoning models often express inference scaling in natural language through tokens expressing uncertainty, like "wait..." or "let me check this..." (Muennighoff et al., 2025) Thus, overthinking often manifests as a tendency to overuse these extending expressions superfluously after the correct answer has already been found.
150
+
151
+ From this insight, we hypothesize that simple text-augmentation methods can be used to counteract this tendency, reminding the model of how long its output has been, and how soon it should come to an answer. THOUGHTTERMINATOR realizes this as a series of interrupt messages at a fixed token interval which are inserted into the autoregressive stream, alerting the model of how many tokens it has spent and how many remain.
152
+
153
+ Sometimes, these timing messages and reminders alone are sufficient to get the model to provide its answer in a concise manner. If a answer isn't provided before the end of the time limit, a terminating prompt and constrained decoding forces the model to output a final answer.
154
+
155
+ 35 students attend a meeting. There is a multiple of 13 girls attending, and more girls than boys. How many boys are at the meeting?
156
+
157
+ # Vanilla reasoning model
158
+
159
+ Okay, so I have this problem here about a math club meeting with 35 students. The number of girls is a multiple of 13, and there are more girls
160
+
161
+ (many tokens later)
162
+
163
+ Also, given that G is a multiple of 13. That means G can be 13, 26, 39, etc., but since the total number of students is 35, the number of girls...
164
+
165
+ ![](images/581acd48568aa80c096caa667172e0fe4c3edfd17e5ec1c57c1612fbadf7a882.jpg)
166
+
167
+ # Thought Terminator
168
+
169
+ I think I only need around 600 tokens to solve.
170
+
171
+ ![](images/352e4f94f22f607ff8f695359842e116f03471cb231b1b06f92cb664ff578a85.jpg)
172
+
173
+ 600 tokens remaining. I'll be back.
174
+
175
+ ![](images/df96533c806e323da66fec49e0b56d5b2dace76596c95829ff5e4090b9f55a9c.jpg)
176
+
177
+ Alright, let's try to figure out this problem. There are 3 students in total in the math club meeting. We know that the number of girls is a multiple of 13, and there are more girls than boys...
178
+
179
+ ![](images/d7f678edad861cb21d30f22416b08ac0469573f0fc0e10be631d47bf4e7d63a4.jpg)
180
+
181
+ (many tokens later)
182
+
183
+ 100 tokens remaining. I'll be back.
184
+
185
+ But wait, we also have the condition that there are more girls than boys, which translates to $G > B$ . Since $G + B = 35$ , I can rewrite this as $G > 35 -$
186
+
187
+ ![](images/13527b3c58070716414b69e0269d27b00d185a55720e3870c1e52b0bb124ad8e.jpg)
188
+
189
+ Then, the number of boys would be $35 - G = 35 - 26 = 9$ . Let me check if this makes sense...
190
+
191
+ ![](images/538aa8e65541ed0336c847201d371c2296709a4338ae65ae25bf71ae082642dc.jpg)
192
+
193
+ **Final Answer** The number of boys at the meeting is 9. In summary, we have determined...
194
+
195
+ ![](images/87364e66d83fbfd29ef9111af5d78b99b22d3af1bd107989f236b69456627d79.jpg)
196
+
197
+ ...girls than boys, we need to determine the number of boys. Thus, the number of boys at the meeting is $\backslash (\backslash$ boxed{9}\backslash)
198
+
199
+ ![](images/c71bfc7f709c37fe7ea407702dfdf8bf2978a601cc473853b26a0955de2d0629.jpg)
200
+ Figure 5 shows an example of a base reasoning model and one using THOUGHTTERMINATOR answering a question. THOUGHTTERMINATOR operates on a reasoning chain in three stages: scheduling, running, and terminating.
201
+
202
+ Correct in 1358 tokens.
203
+
204
+ Therefore, the number of boys at the meeting is 9. Just to double-check, let me add them up: 26 girls + 9 boys = 35 students total. Yep, that adds up. Also, 26 is indeed a multiple of 13, and 26 is greater than 9, so all the given conditions are satisfied. But wait, I think...
205
+
206
+ ![](images/811d4bc555cff8efb45585028fa7f9cb6319de931fd55724b0e10f068a0e20f7.jpg)
207
+
208
+ Time is up. Answer NOW
209
+
210
+ There are 9 boys at the meeting.
211
+
212
+ Correct in 645 tokens.
213
+
214
+ ![](images/df13ded6d9154f47a91496e336a31a618bb28449dbf7c3458660c781ce2b3cbb.jpg)
215
+ Figure 5: THOUGHTTERMINATOR uses a reasoning model's (calibrated) estimate of the difficulty of a problem to set its intervention, periodically interrupting the reasoning model's output to remind it of the amount of remaining tokens. Once the token allotment has been used, it forces the model to provide an answer with constrained decoding.
216
+
217
+ Scheduling. Given an input question THOUGHTTERMINATOR needs an estimate of how many tokens are necessary to produce a correct answer in order to set its interrupt rate and termination time.
218
+
219
+ Under our difficulty-calibrated token budget hypothesis, we assume that the number of required tokens can be estimated based on the difficulty of the question. In deployment, THOUGHTTERMINATOR is used in the tool-use paradigm, where a running model makes its own estimate of the difficulty of an input question and then invokes it.
220
+
221
+ We experiment with both a trained difficulty estimator and a zero-shot one (gpt-4o) to produce token spend estimates for each problem to characterize performance in this setting. To train a difficulty estimator, we divide the training set questions into 10 balanced bins based on their difficulty scores. We then finetune a Llama-3-8B-Instruct model to predict the difficulty level of a given question. To convert the predicted difficulty level into an appropriate number of answer tokens, we compute the averaged length of minimal successful answers for each difficulty level in the training set.
222
+
223
+ Running. Once the deadline has been set in scheduling, the base reasoning model's generation process runs. Every $n = \min(250, \text{deadline}/2)$ steps an interrupt message<sup>2</sup> is inserted into the token stream, notifying the model of how many tokens have been used and how many remain.
224
+
225
+ ![](images/1fdea51374a5af38e976d087dd52dada79a30f34c4e95db72a81d13f280ac896.jpg)
226
+
227
+ ![](images/6700178cb0c9508c948988b611b46ce8b0c8317c142f6d52afd4180f3a6d8158.jpg)
228
+
229
+ ![](images/e1ba5e633e285ccab04d1c816d0cb6775a0ca8921c5a15bbdc4c91e69ae2d042.jpg)
230
+ Figure 6: Comparison of the relationship between Pass@10 and token spend for the evaluated reasoning models in the "Base" setting and with THOUGHTTERMINATOR.
231
+
232
+ ![](images/759e54dcb199c1868fd8ab257be18fc25efe607549f01a548c1499fa91e96d72.jpg)
233
+
234
+ ![](images/682c4a69257fd7dd81c194606163169380868bc470ed364ffdf64c1588251b44.jpg)
235
+
236
+ <table><tr><td rowspan="2">Model</td><td colspan="3">Base</td><td colspan="3">Thought Terminator</td></tr><tr><td>Local \( O_{env} \downarrow \)</td><td>Global \( O_g \downarrow \)</td><td>Accuracy ↑</td><td>Local \( O_{env} \downarrow \)</td><td>Global \( O_g \downarrow \)</td><td>Accuracy ↑</td></tr><tr><td>QwQ-32B-Preview</td><td>2923</td><td>3698</td><td>0.80</td><td>518 (-82%)</td><td>693 (-81%)</td><td>0.79 (-1%)</td></tr><tr><td>QwQ-32B</td><td>13662</td><td>11248</td><td>0.94</td><td>215 (-98%)</td><td>1021 (-91%)</td><td>0.80 (-15%)</td></tr><tr><td>R1-1.5B</td><td>5730</td><td>4262</td><td>0.50</td><td>696 (-88%)</td><td>882 (-79%)</td><td>0.80 (+59%)</td></tr><tr><td>R1-7B</td><td>3881</td><td>4001</td><td>0.73</td><td>678 (-83%)</td><td>948 (-76%)</td><td>0.81 (+11%)</td></tr><tr><td>R1-8B</td><td>4232</td><td>5755</td><td>0.92</td><td>725 (-83%)</td><td>1148 (-80%)</td><td>0.80 (-13%)</td></tr></table>
237
+
238
+ Table 2: Local envelop overthinking ( $O_{\text{env}}$ ) and global overthinking ( $O_g$ ) scores, along with accuracy for reasoning models under the Base setting and with Thought Terminator. Relative changes from Base to Thought Terminator are shown in parentheses.
239
+
240
+ At each interrupt, THOUGHTTERMINATOR performs a regex check for the expected (and specified in the prompt) final answer format. If an answer is detected, the reasoning chain is immediately terminated and the answer is returned.
241
+
242
+ Terminating. If a final answer hasn't been produced by the deadline, a termination message is shown to the model, and then a final output is immediately generated with constrained decoding using the same answer-finding regex.
243
+
244
+ # 5 Results
245
+
246
+ Figure 6 shows the performance and token spend of five DeepSeek and QwQ reasoning models in the base setting (triangle marker) and with THOUGHTTERMINATOR (star marker). Table 2 shows the change in overthinking scores reasoning models exhibit from base setting to THOUGHTTERMINATOR.
247
+
248
+ 4/5 models on MATH500, 2/3 models on GPQA, and all models on Zebra and DUMB500-MATH see significant decrease in overthinking for effectively equivalent (or better) Pass@10 performance under THOUGHTTERMINATOR than under standard decoding. Globally, overthinking scores drop dramatically and accuracy increases when THOUGHTTERMINATOR is used. Considering that the token spend budgets are directly defined by LMs, THOUGHTTERMINATOR is a simple and effective tool to dramatically improve token efficiency in reasoning models.
249
+
250
+ ![](images/89899d90f80f980839663034641a61668a0142bcfe1e2542c837d3074ea4dd84.jpg)
251
+
252
+ ![](images/92e86fd9ceb7f94163041bf0cb974852de5485b8ade891ce3d1a7a653157e62a.jpg)
253
+
254
+ ![](images/8e21f1e860839d4ea22a7075d2360b946e6269ff7810d2faf192f17d790a918f.jpg)
255
+ Figure 7: Calibration ablation experiment using DeepSeek-R1-1.5B. real-min represents using the previously observed minimum successful answer length (or, a fallback maximum for examples that were never solved correctly) as the THOUGHTTERMINATOR deadline. fix-{200,500,1000,2000} signify using the respective number as a fixed token count deadline for all samples. pred-diff-{gpt4o, ref, trained} refer to using question-level difficulty predictions as deadlines, produced from external LMs, a question-level reference difficulty key of token lengths from the other models, or trained RMs.
256
+
257
+ ![](images/dee4ed60354ef72a6680045f881545b825ea445cb6273f9f580a4f803fb4fd33.jpg)
258
+
259
+ ![](images/f3af9c4616ec29961937b3d6f7fac9ec81c7c1da8057695da4898fe6d0ee3661.jpg)
260
+
261
+ # 5.1 Calibration of THOUGHTTERMINATOR
262
+
263
+ To evaluate how well-calibrated THOUGHTTERMINATOR is (i.e., whether the token budget selections are optimal) we compare our difficulty prediction-based deadline estimator against a set of baselines. In addition to our trained difficulty predictor and zero-shot gpt4o predictor, we use the previously observed optimal token spends from base models (section 2) and fixed deadlines of 500, 1000, and 2000 tokens with DeepSeek-r1-Qwen-1.5b to assess how performant our predicted deadlines are in the THOUGHTTERMINATOR framework.
264
+
265
+ Figure 7 shows the performance of the model under those deadline prediction strategies.
266
+
267
+ Our method, pred-diff-trained, achieves optimal Pass@10 over the other methods on MATH500 and DUMB500, and is within $0.02\%$ of optimal Pass@10 on ZebraLogic and GPQA, for significant savings in compute cost. Note how all four datasets exhibit a positive correlation between average token spend and Pass@10 which eventually reaches a steady maximum. Under our definition, overthinking mitigation can be thought of as identifying the lowest token spend that recovers high-spend performance. Figure 7 confirms that THOUGHTTERMINATOR achieves this.
268
+
269
+ # 5.2 Utility of interrupt messages in THOUGHTTERMINATOR
270
+
271
+ Appendix Table 3 shows the difference in performance of r1-1.5B in an unmodified base condition, as well as under a naive baseline, and THOUGHTTERMINATOR with question-level randomly assigned deadlines and the core trained-predicted deadlines. In this naive baseline the reasoning model is immediately interrupted at the deadline, and without warning forced to generate an answer using the same constrained decoding technique.
272
+
273
+ r1-1.5B-THOUGHTTERMINATOR presents roughly equivalent performance to the naïve baseline on the non-arithmetic GPQA and ZebraLogic datasets in Pass@10, and wins by $6\%$ on MATH500 and $18\%$ on DUMB500-math. This suggests that the intermediate interrupt messages produced by THOUGHTTERMINATOR do play a role in minimizing performance loss of decoding-based overthinking mitigation.
274
+
275
+ # 6 Related Work
276
+
277
+ Mitigating overthinking. To shorten LLM reasoning chains, Deng et al. (2024) and Liu et al. (2024) propose to internalize intermediate steps by iteratively training the models, though this introduces additional training overhead. Dynasor is a technique for terminating chains of thought using the LM's confidence in a probe containing the string "wait, I just realized I know the answer..." with constrained decoding (Fu et al., 2024). While our termination process can use a similar constrained decoding technique, THOUGHTTERMINATOR is not reliant on a white-box probe, and is much simpler to run. Chen et al. (2024) introduce metrics for overthinking and process efficiency, similar to us, but they focus on important heuristics such as "number of repetitions of the correct answer" or "ratio of correct to incorrect answer proposals", while our analysis solely quantifies overthinking based on the observed distribution of reasoning chain lengths.
278
+
279
+ Benchmarking reasoning models. A number of benchmarks have been proposed to evaluate the reasoning ability of large language models (LLMs), with a focus on challenging, multi-step problem-solving.(Cobbe et al., 2021; Srivastava et al., 2022; Hendrycks et al., 2021; Zhu et al., 2023; Lin et al., 2024). Several recent works on efficiency benchmarking of LMs have been proposed, including Mercury, an efficiency evaluation for code synthesis tasks (Du et al., 2024). GSM8k-Zero is an another dataset to evaluate efficiency of reasoning, which contains easy questions from GSM8K (Chiang & Lee, 2024).
280
+
281
+ # 7 Conclusions
282
+
283
+ In this work we analyzed the problem of overthinking in reasoning models through an observational lens. Motivated by our observational measures of overthinking, we demonstrated a clear sample-wise relationship between token spend and question-level difficulty. We introduced the DUMB500 dataset to allow us to evaluate the robustness of any overthinking mitigation to simple questions and proposed THOUGHTTERMINATOR, a simple inference-time technique to ensuring efficient token spend, calibrated by the aforementioned difficulty-optimal spend relationship.
284
+
285
+ # References
286
+
287
+ Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Qiuzhi Liu, Mengfei Zhou, Zhuosheng Zhang, Rui Wang, Zhaopeng Tu, Haitao Mi, and Dong Yu. Do not think that much for $2 + 3 = ?$ on the overthinking of o1-like llms. ArXiv, abs/2412.21187, 2024. URL https://api_semanticscholar.org/CorpusID:275133600.
288
+ Cheng-Han Chiang and Hung-yi Lee. Over-reasoning and redundant calculation of large language models. In Yvette Graham and Matthew Purver (eds.), Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 2: Short Papers), pp. 161-169, St. Julian's, Malta, March 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.eacl-short.15/.
289
+ Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021.
290
+ DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Jun-Mei Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiaoling Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z. F. Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Lu, Ziyi Gao, Aixin Liu, Bing Xue, Bing-Li Wang, Bochao Wu, Bei Feng, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Deli Chen, Dong-Li Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Qu, Hui Li, Jianzhong Guo, Jiashi Li, Jiawei Wang, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, Jiong
291
+
292
+ Cai, Jiaqi Ni, Jian Liang, Jin Chen, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Liang Zhao, Litong Wang, Liyue Zhang, Lei Xu, Leyi Xia, Mingchuan Zhang, Minghua Zhang, M. Tang, Meng Li, Miaojun Wang, Mingming Li, Ning Tian, Panpan Huang, Peng Zhang, Qiancheng Wang, Qinyu Chen, Qiushi Du, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, R. J. Chen, R. L. Jin, Ruyi Chen, Shanghai Lu, Shangyan Zhou, Shanhuang Chen, Shengfeng Ye, Shiyu Wang, Shuiping Yu, Shunfeng Zhou, Shuting Pan, S. S. Li, Shuang Zhou, Shao-Kang Wu, Tao Yun, Tian Pei, Tianyu Sun, T. Wang, Wangding Zeng, Wanjia Zhao, Wen Liu, Wenfeng Liang, Wenjun Gao, Wen-Xia Yu, Wentao Zhang, W. L. Xiao, Wei An, Xiaodong Liu, Xiaohan Wang, Xiaokang Chen, Xiaotao Nie, Xin Cheng, Xin Liu, Xin Xie, Xingchao Liu, Xinyu Yang, Xinyuan Li, Xuecheng Su, Xuheng Lin, X. Q. Li, Xiangyu Jin, Xi-Cheng Shen, Xiaosha Chen, Xiaowen Sun, Xiaoxiang Wang, Xinnan Song, Xinyi Zhou, Xianzu Wang, Xinxia Shan, Y. K. Li, Y. Q. Wang, Y. X. Wei, Yang Zhang, Yanhong Xu, Yao Li, Yao Zhao, Yaofeng Sun, Yaohui Wang, Yi Yu, Yichao Zhang, Yifan Shi, Yi Xiong, Ying He, Yishi Piao, Yisong Wang, Yixuan Tan, Yiyang Ma, Yiyuan Liu, Yongqiang Guo, Yuan Ou, Yuduan Wang, Yue Gong, Yu-Jing Zou, Yujia He, Yunfan Xiong, Yu-Wei Luo, Yu mei You, Yuxuan Liu, Yuyang Zhou, Y. X. Zhu, Yanping Huang, Yao Li, Yi Zheng, Yuchen Zhu, Yunxiang Ma, Ying Tang, Yukun Zha, Yuting Yan, Zehui Ren, Zehui Ren, Zhangli Sha, Zhe Fu, Zhean Xu, Zhenda Xie, Zhen guo Zhang, Zhewen Hao, Zhicheng Ma, Zhigang Yan, Zhiyu Wu, Zihui Gu, Zijia Zhu, Zijun Liu, Zi-An Li, Ziwei Xie, Ziyang Song, Zizheng Pan, Zhen Huang, Zhipeng Xu, Zhongyu Zhang and Zhen Zhang. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. ArXiv, abs/2501.12948. 2025. URL https://api(semanticscholar.org/CorpusID:275789950.
293
+
294
+ Yuntian Deng, Yejin Choi, and Stuart Shieber. From explicit cot to implicit cot: Learning to internalize cot step by step. arXiv preprint arXiv:2405.14838, 2024.
295
+
296
+ Mingzhe Du, Anh Tuan Luu, Bin Ji, Qian Liu, and See-Kiong Ng. Mercury: A code efficiency benchmark for code large language models. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2024.
297
+
298
+ Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, Anirudh Goyal, Anthony S. Hartshorn, Aobo Yang, Archi Mitra, Archie Sravankumar, Artem Korenev, Arthur Hinsvark, Arun Rao, Aston Zhang, Aurélien Rodriguez, Austen Gregerson, Ava Spataru, Bap tiste Roziere, Bethany Biron, Binh Tang, Bobbie Chern, Charlotte Caucheteux, Chaya Nayak, Chloe Bi, Chris Marra, Chris McConnell, Christian Keller, Christophe Touret, Chunyang Wu, Corinne Wong, Cristian Cantón Ferrer, Cyrus Nikolaidis, Damien Alonsius, Daniel Song, Danielle Pintz, Danny Livshits, David Esiobu, Dhruv Choudhary, Dhruv Mahajan, Diego Garcia-Olano, Diego Perino, Dieuwke Hupkes, Egor Lakomkin, Ehab A. AlBadawy, Elina Lobanova, Emily Dinan, Eric Michael Smith, Filip Radenovic, Frank Zhang, Gabriele Synnaeve, Gabrielle Lee, Georgia Lewis Anderson, Graeme Nail, Grégoire Mialon, Guanglong Pang, Guillem Cucurell, Hailey Nguyen, Hannah Korevaar, Hu Xu, Hugo Touvron, Iliyan Zarov, Imanol Arrieta Ibarra, Isabel M. Kloumann, Ishan Misra, Ivan Evtimov, Jade Copet, Jaewon Lee, Jan Geffert, Jana Vranes, Jason Park, Jay Mahadeokar, Jeet Shah, Jelmer van der Linde, Jennifer Billock, Jenny Hong, Jenya Lee, Jeremy Fu, Jianfeng Chi, Jianyu Huang, Jiawen Liu, Jie Wang, Jiecao Yu, Joanna Bitton, Joe Spisak, Jongsoo Park, Joseph Rocca, Joshua Johnstun, Joshua Saxe, Ju-Qing Jia, Kalyan Vasuden Alwala, K. Upasani, Kate Plawiak, Keqian Li, Ken-591 neth Heafield, Kevin Stone, Khalid El-Arini, Krithika Iyer, Kshitiz Malik, Kuen ley Chiu, Kunal Bhalla, Lauren Rantala-Yeary, Laurens van der Maaten, Lawrence Chen, Liang Tan, Liz Jenkins, Louis Martin Lovish Madaan Lubo Malo Lukas Blecher Lukas Landzaat Luke de Oliveira Madeline Muzzi Mahesh Pasupuleti Mannat Singh Manohar Paluri Marcin Kardas Mathew Oldham Mathieu Rita Maya Pavlova Melissa Hall Melanie Kambadur Mike Lewis Min Si Mitesh Kumar Singh Mona Hassan Naman Goyal Narjes Torabi Nikolay Bashlykov Nikolay Bogoychev Niladri S. Chatterji Olivier Duchenne Onur cCelebi Patrick Alrassy Pengchuan Zhang Pengwei Li Petar Vasic Peter Weng Prajwal Bhargava Pratik Dubal Praveen Krishnan,Punit Singh Koura Puxin Xu Qing He Qingxiao Dong Ragavan Srinivasan Raj Ganapathy Ramon Calderer Ricardo Silveira Cabral Robert Stojnic Roberta Raileanu Rohit Girdhar Rohit Patel Ro main Sauvestre
299
+
300
+ Ronnie Polidoro, Roshan Sumbaly, Ross Taylor, Ruan Silva, Rui Hou, Rui Wang, Saghar Hosseini, Sahana Chennabasappa, Sanjay Singh, Sean Bell, Seohyun Sonia Kim, Sergey Edunov, Shaoliang Nie, Sharan Narang, Sharath Chandra Raparthy, Sheng Shen, Shengye Wan, Shruti Bhosale, Shun Zhang, Simon Vandenhende, Soumya Batra, Spencer Whitman, Sten Sootla, Stephane Collot, Suchin Gururangan, Sydney Borodinsky, Tamar Herman, Tara Fowler, Tarek Sheasha, Thomas Georgiou, Thomas Scialom, Tobias Speckbacher, Todor Mihaylov, Tong Xiao, Ujjwal Karn, Vedanuj Goswami, Vibhor Gupta, Vignesh Ramanathan, Viktor Kerkez, Vincent Gonguet, Virginie Do, Vish Vogeti, Vladan Petrovic, Weiwei Chu, Wenhan Xiong, Wenyin Fu, Whit ney Meers, Xavier Martinet, Xiaodong Wang, Xiaoqing Ellen Tan, Xinfeng Xie, Xuchao Jia, Xuewei Wang, Yaelle Goldschlag, Yashesh Gaur, Yasmine Babaei, Yiqian Wen, Yiwen Song, Yuchen Zhang, Yue Li, Yuning Mao, Zacharie Delpierre Coudert, Zhengxu Yan, Zhengxing Chen, Zoe Papakipos, Aaditya K. Singh, Aaron Grattafori, Abha Jain, Adam Kelsey, Adam Shajnfeld, Adi Gangidi, Adolfo Victoria, Ahuva Goldstand, Ajay Menon, Ajay Sharma, Alex Boesenberg, Alex Vaughan, Alexei Baevski, Allie Feinstein, Amanda Kallet, Amit Sangani, Anam Yunus Andrei Lupu, Andres Alvarado, Andrew Caples, Andrew Gu, Andrew Ho, Andrew Poulton, Andrew Ryan, Ankit Ramchandani, Annie Franco, Aparajita Saraf, Arkabandhu Chowdhury, Ashley Gabriel, Ashwin Bharambe, Assaf Eisenman, Azadeh Yazdan, Beau James, Ben Maurer, Ben Leonhardi, Po-Yao (Bernie) Huang, Beth Loyd, Beto De Paola Bhargavi Paranjape, Bing Liu, Bo Wu Boyu Ni Braden Hancock Bram Wasti Brandon Spence Brani Stojkovic Brian Gamido Britt Montalvo Carl Parker Carly Burton Catalina Mejia Changhan Wang Changkyu Kim Chao Zhou Chester Hu Ching-Hsiang Chu Chris Cai Chris Tindal Christoph Feichtenhofer Damon Civin Dana Beaty Daniel Keymer Shang-Wen Li Danny Wyatt David Adkins David Xu Davide Testuggine Delia David Devi Parikh Diana Liskovich Didem Foss Dingkang Wang Duc Le,Dustin Holland Edward Dowling Eissa Jamil Elaine Montgomery Eleonora Presani Emily Hahn Emily Wood Erik Brinkman Esteban Arcaute Evan Dunbar Evan Smothers Fei Sun Felix Kreuk Feng Tian First Ozgenel Francesco Caggioni Francisco Guzm'an Frank J. Kanayet Frank Seide Gabriela Medina Florez Gabriella Schwarz Gada Badeer Georgia Swee Gil Halpern Govind Thattai Grant Herman Grigory G. Sizov Guangyi ZhangGuna Lakshminarayananan Hamid Shojanazeri Han Zou Hannah Wang Han Zha Haroun Habeeb Harrison Rudolph Helen Suk Henry Aspegren Hunter Goldman Igor Molybog Igor Tufanov Irina-Elena Veliche Itai Gat Jake Weissman James Geboski James Kohli Japhet Asher Jean-Baptiste Gaya Jeff Marcus Jeff Tang Jennifer Chan Jenny Zhen Jeremy Reizenstein Jeremy Teboul Jessica Zhong Jian Jin Jingyi Yang Joe Cummings Jon Carvill Jon Shepard Jonathan McPhie Jonathan Torres Josh Ginsburg Junjie Wang Kaixing(Kai) Wu U KamHou Karan Saxena Karthik Prasad Kartikay Khandelwal Katayoun Zand Kathy Matosich Kaushik Veeraraghavan Kelly Michelena Keqian Li Kun HuangKunal Chawla Kushal Lakhotia Kyle HuangLailin Chen Laksha Garg A Lavender Leandro Silva Lee Bell Lei Zhang Liangpeng Guo Licheng Yu Liron Moshkovich Luca Wehrstedt Madian Khabsa Manav Avalani Manish Bhatt Maria Tsimpoukelli Martynas Mankus Matan Hasson Matthew Lennie Matthias Reso Maxim Groshev Maxim Naumov Maya Lathi Meghan Keneally Michael L.Seltzer Michal Valko Michelle Restrepo Mihir Patel Mik Vyatskov Mikayel Samvelyan Mike Clark Mike Macey Mike Wang Miquel Jubert Hermoso Mo Metanat Mohammad Raste gari Munish Bansal Nandhini Santhanam Natascha Parks Natasha White Navyata Bawa Nayan Singhal Nick Egebo Nicolas Usunier Nikolay Pavlovich Laptev Ning Dong Ning Zhang Norman Cheng Oleg Chernoguz Olivia Hart Omkar Salpekar Ozlem Kalinli Parkin Kent Parth Parekh Paul Saab Pavan Balaji Pedro Rittner Philip Bontrager Pierre Roux Piotr Dollar Polina Zvyagina Prashant Ratanchandani Prishit Yuvraj Qian Liang Rachad Alao Rachel Rodriguez Rafi Ayub Ragtootham Murthy Raghu Nayani Rahul Mitra Raymond Li Rebekkah Hogan Robin Battey Rocky Wang Rohan Maheswari Russ Howes Rudy Rinott Sai Jayesh Bondu Samyak Datta Sara Chugh Sara Hunt Sargun Dhillon Sasha Sidorov Satadru Pan Saurabh Verma Seiji Yamamoto Sharadh Ramaswamy Shaun Lindsay Sheng Feng Shenghao Lin Shengxin Cindy Zha Shiva Shankar Shuqiang Zhang Sinong WangSneha Agarwal Soji Sajuyigbe Soumith Chintala Stephanie Max Stephen Chen Steve Kehoe Steve Satterfield Sudarshan Govindaprasad Sumit Gupta Sung-Bae Cho Sunny Virk Suraj Subramanian Sy Choudhury Sydney Goldman Tal Remez Tamar Glaser Tamara Best Thilo Kohler Thomas Robinson
301
+
302
+ Tianhe Li, Tianjun Zhang, Tim Matthews, Timothy Chou, Tzook Shaked, Varun Vontimitta, Victoria Ajayi, Victoria Montanez, Vijai Mohan, Vinay Satish Kumar, Vishal Mangla, Vlad Ionescu, Vlad Andrei Poenaru, Vlad T. Mihailescu, Vladimir Ivanov, Wei Li, Wenchen Wang, Wenwen Jiang, Wes Bouaziz, Will Constable, Xia Tang, Xiaofang Wang, Xiaojian Wu, Xiaolan Wang, Xide Xia, Xilun Wu, Xinbo Gao, Yanjun Chen, Ye Hu, Ye Jia, Ye Qi, Yenda Li, Yilin Zhang, Ying Zhang, Yossi Adi, Youngjin Nam, Yu Wang, Yuchen Hao, Yundi Qian, Yuzi He, Zach Rait, Zachary DeVito, Zef Rosnbrick, Zhaoduo Wen, Zhenyu Yang, and Zhiwei Zhao. The llama 3 herd of models. ArXiv, abs/2407.21783, 2024. URL https://api_semanticscholar.org/CorpusID:271571434.
303
+ Yichao Fu, Junda Chen, Siqi Zhu, Zheyu Fu, Zhongdongming Dai, Aurick Qiao, and Hao Zhang. Efficiently serving llm reasoning programs with certainindex. arXiv preprint arXiv:2412.20993, 2024.
304
+ Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset. NeurIPS, 2021.
305
+ Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024.
306
+ Tian Liang, Zhiwei He, Wenxiang Jiao, Xing Wang, Yan Wang, Rui Wang, Yujiu Yang, Zhaopeng Tu, and Shuming Shi. Encouraging divergent thinking in large language models through multi-agent debate. ArXiv, abs/2305.19118, 2023. URL https://apisemantic scholar.org/CorpusID:258967540.
307
+ Hunter Lightman, Vineet Kosaraju, Yura Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. ArXiv, abs/2305.20050, 2023. URL https://api-semanticscholar.org/CorpusID:258987659.
308
+ Bill Yuchen Lin, Ronan Le Bras, Peter Clark, and Yejin Choi. Zebralogic: Benchmarking the logical reasoning ability of language models, 2024. URL https://huggingface.co/spaces/allenai/ZebraLogic.
309
+ Tengxiao Liu, Qipeng Guo, Xiangkun Hu, Cheng Jiayang, Yue Zhang, Xipeng Qiu, and Zheng Zhang. Can language models learn to skip steps? arXiv preprint arXiv:2411.01855, 2024.
310
+ Gemma Team Thomas Mesnard, Cassidy Hardin, Robert Dadashi, Surya Bhupatiraju, Shreya Pathak, L. Sifre, Morgane Riviere, Mihir Kale, J Christopher Love, Pouya Dehghani Tafti, L'leonard Hussenot, Aakanksha Chowdhery, Adam Roberts, Aditya Barua, Alex Botev, Alex Castro-Ros, Ambrose Slone, Am'elie H'eliou, Andrea Tacchetti, Anna Bulanova, Antonia Paterson, Beth Tsai, Bobak Shahriari, Charline Le Lan, Christopher A. Choquette-Choo, Cl'ment Crepy, Daniel Cer, Daphne Ippolito, David Reid, Elena Buchatskaya, Eric Ni, Eric Noland, Geng Yan, George Tucker, George-Christian Muraru, Grigory Rozhdestvenskiy, Henryk Michalewski, Ian Tenney, Ivan Grishchenko, Jacob Austin, James Keeling, Jane Labanowski, Jean-Baptiste Lespiau, Jeff Stanway, Jenny Brennan, Jeremy Chen, Johan Ferret, Justin Chiu, Justin Mao-Jones, Kather ine Lee, Kathy Yu, Katie Millican, Lars Lowe Sjoesund, Lisa Lee, Lucas Dixon, Michael Reid, Maciej Mikula, Mateo Wirth, Michael Sharman, Nikolai Chinaev, Nithum Thain, Olivier Bachem, Oscar Chang, Oscar Wahltinez, Paige Bailey, Paul Michel, Petko Yotov, Pier Giuseppe Sessa, Rahma Chaabouni, Ramona Comanescu, Reena Jana, Rohan Anil, Ross McIlroy, Ruibo Liu, Ryan Mullins, Samuel L. Smith, Sebastian Borgeaud, Sertan Girgin, Sholto Douglas, Shree Pandya, Siamak Shakeri, Soham De, Ted Klimenko, Tom Hennigan, Vladimir Feinberg Wojciech Stokowiec, Yu hui Chen, Zafarali Ahmed, Zhitao Gong, Tris Warkentin, Ludovic Peran, Minh Giang, Clément Farabet, Oriol Vinyals, Jeffrey Dean, Koray Kavukcuoglu Demis Hassabis, Zoubin Ghahramani, Douglas Eck, Joelle Barral, Fernando Pereira Eli Collins, Armand Joulin, Noah Fiedel, Evan Senter, Alek Andreev, and Kathleen Kenealy. Gemma: Open models based on gemini research and technology. ArXiv, abs/2403.08295 2024. URL https://api_semanticscholar.org/CorpusID:268379206.
311
+
312
+ Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Fei-Fei Li, Hanna Hajishirzi, Luke S. Zettlemoyer, Percy Liang, Emmanuel J. Candes, and Tatsunori Hashimoto. s1: Simple test-time scaling. ArXiv, abs/2501.19393, 2025. URL https://api-semanticscholar.org/CorpusID:276079693.
313
+ Liangming Pan, Michael Stephen Saxon, Wenda Xu, Deepak Nathani, Xinyi Wang, and William Yang Wang. Automatically correcting large language models: Surveying the landscape of diverse automated correction strategies. Transactions of the Association for Computational Linguistics, 12:484-506, 2024. URL https://api_semanticscholar.org/ CorpusID:269636518.
314
+ Qwen. Qwq-32b: Embracing the power of reinforcement learning, March 2025. URL https://qwenlm.github.io/blog/qwq-32b/.
315
+ David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R. Bowman. Gpqa: A graduate-level google-proof q&a benchmark. ArXiv, abs/2311.12022, 2023. URL https://api_semanticscholar.org/CorpusID:265295009.
316
+ Zayne Sprague, Fangcong Yin, Juan Diego Rodriguez, Dongwei Jiang, Manya Wadhwa, Prasann Singhal, Xinyu Zhao, Xi Ye, Kyle Mahowald, and Greg Durrett. To cot or not to cot? chain-of-thought helps mainly on math and symbolic reasoning. ArXiv, abs/2409.12183, 2024. URL https://api(semanticscholar.org/CorpusID:272708032.
317
+ Aarohi Srivastava, Abhinav Rastogi, Abhishek Rao, Abu Awal Md Shoeb, Abubakar Abid, Adam Fisch, Adam R. Brown, Adam Santoro, Aditya Gupta, Adrià Garriga-Alonso, Agnieszka Kluska, Aitor Lewkowycz, Akshit Agarwal, Alethea Power, Alex Ray, Alex Warstadt, Alexander W. Kocurek, Ali Safaya, Ali Tazarv, Alice Xiang, Alicia Parrish, Allen Nie, Aman Hussain, Amanda Askell, Amanda Dsouza, Ambrose Slone, Ameet Rahane, Anantharaman S. Iyer, Anders Andreassen, Andrea Madotto, Andrea Santilli, Andreas Stuhlmuller, Andrew M. Dai, Andrew La, Andrew Kyle Lampinen, Andy Zou, Angela Jiang, Angelica Chen, Anh Vuong, Animesh Gupta, Anna Gottardi, Antonio Norelli, Anu Venkatesh, Arash Gholamidavoodi, Arfa Tabassum, Arul Menezes, Arun Kirubarajan, Asher Mullokandov, Ashish Sabharwal, Austin Herrick, Avia Efrat, Aykut Erdem, Ayla Karakacs, B. Ryan Roberts, Bao Sheng Loe, Barret Zoph, Bartlomiej Bojanowski, Batuhan Ozyurt, Behnam Hedayatnia, Behnam Neyshabur, Benjamin Inden, Benno Stein, Berk Ekmeki, Bill Yuchen Lin, Blake Stephen Howald, Bryan Orinion, Cameron Diao, Cameron Dour, Catherine Stinson, Cedrick Argueta, C'esar Ferri Ram'irez, Chandan Singh, Charles Rathkopf, Chenlin Meng, Chitta Baral, Chiyu Wu, Chris Callison-Burch, Chris Waites Christian Voigt, Christopher D. Manning, Christopher Potts, Cindy Ramirez, Clara E. Rivera, Clemencia Siro, Colin Raffel, Courtney Ashcraft, Cristina Garbacea, Damien Sileo Daniel H Garrette Dan Hendrycks Dan Kilman Dan Roth Daniel Freeman Daniel Khashabi Daniel Levy Daniel Mosegu'i Gonz'alez Danielle R. Perszyk Danny Hernandez Danqi Chen,Daphne IppolitoDar Gilboa David DohanDavid Drakard David Jurgens, Debajyoti Datta Deep Ganguli Denis Emelin Denis Kleyko Deniz Yuret Derek ChenDerek TamDieuwke Hupkes Diganta Misra Dilyar Buzan,Dimitri Coelho Mollo Diyi YangDong-Ho LeeDylan Schrader Ekaterina ShutovaEkin Dogus Cubuk Elad Segal,Eleanor Hagerman Elizabeth BarnesElizabeth DonowayEllie Pavlick Emanuele Rodola Emma Lam Eric ChuEric Tang Erkut Erdem Ernie Chang Ethan A. ChiEthan DyerEthan J. JerzakEthan KimEunice Engefu ManyasiEvgenii Zheltonozhskii,Fanyue Xia,Fatemeh Siar Fernando Mart'inez-Plumed Francesca Happ'eFrancois Chollet Frieda RongGaurav Mishra Genta Indra Winata Gerard de Melo German Kruszewski Giambattista Parascandolo Giorgio Mariani Gloria Xinyue WangGonzalo JaimovitchL'opezGregor BetzGuy Gur-AriHana Galijasevic Hannah Kim Hannah Rashkin Hannaneh Hajishirzi Harsh Mehta Hayden Bogar Henry Shevlin Hinrich Schutze Hiromu Yakura Hongming Zhang Hugh Mee Wong Ian Ng Isaac Noble Jaap Jumelet Jack Geissinger John Kernion Jacob Hilton Jaehoon Lee Jaime Fernandez FisacJames B. Simon James Koppel James Zheng James Zou Jan Koco'nJana Thompson Janelle Wingfield Jared Kaplan Jarema Radom Jascha Narain Sohl-Dickstein Jason Phang Jason Wei Jason Yosinski,Jekaterina Novikova Jelle Bosscher Jennifer Marsh Jeremy KimJeroen
318
+
319
+ Taal, Jesse Engel, Jesujoba Oluwadara Alabi, Jiacheng Xu, Jiaming Song, Jillian Tang, Jane W Waweru, John Burden, John Miller, John U. Balis, Jonathan Batchelder, Jonathan Berant, Jorg Frohberg, Jos Rozen, Jose Fernandez-Orallo, Joseph Boudeman, Joseph Guerr, Joseph Jones, Joshua B. Tenenbaum, Joshua S. Rule, Joyce Chua, Kamil Kanclerz, Karen Livescu, Karl Krauth, Karthik Gopalakrishnan, Katerina Ignatyeva, Katja Markert, Kaustubh D. Dhole, Kevin Gimpel, Kevin Omondi, Kory Wallace Mathewson, Kristen Chiafullo, Ksenia Shkaruta, Kumar Shridhar, Kyle McDonell, Kyle Richardson, Laria Reynolds, Leo Gao, Li Zhang, Liam Dugan, Lianhui Qin, Lidia Contreras-Ochando, LouisPhilippe Morency, Luca Moschella, Luca Lam, Lucy Noble, Ludwig Schmidt, Luheng He, Luis Oliveros Col'on, Luke Metz, Lutfi Kerem cSenel, Maarten Bosma, Maarten Sap, Maartje ter Hoeve, Maheen Farooqi, Manaal Faruqui, Mantas Mazeika, Marco Baturan, Marco Marelli, Marco Maru, Maria Jose Ram'irez Quintana, Marie Tolkiehn Mario Giulianelli, Martha Lewis, Martin Potthast, Matthew L. Leavitt, Matthias Hagen, Matyas Schubert, Medina Baitemirova, Melody Arnaud, Melvin McElrath, Michael A. Yee, Michael Cohen, Michael Gu, Michael Ivanitskiy, Michael Starritt, Michael Strube, Michal Swkedrowski, Michele Bevilacqua, Michihiro Yasunaga, Mihir Kale, Mike Cain, Mimee Xu, Mirac Suzgun, Mitch Walker, Monica Tiwari, Mohit Bansal, Moin Aminnaseri Mor Geva, Mozhdeh Gheini, T. MukundVarma, Nanyun Peng, Nathan A. Chi, Nayeon Lee, Neta Gur-Ari Krakover, Nicholas Cameron, Nicholas Roberts, Nick Doiron, Nicole Martinez,Nikita Nangia,Niklas Deckers,Niklas Muennighoff,Nitish Shirish Keskar Niveditha Iyer Noah Constant Noah Fiedel Nuan Wen Oliver ZhangOmar Agha Omar Elbaghdadi Omer Levy Owain Evans Pablo Antonio Moreno Casares Parth Doshi Pascale Fung Paul Pu Liang Paul Vicol Pegah Alipoormolabashi Peiyuan Liao Percy Liang Peter Chang Peter Eckersley Phu Mon Htut Pinyu HwangP.Milkowski Piyush S.Patil Pouya Pezeshkpour Priti Oli Qiaozhu Mei Qing Lyu Qinlang Chen Rabin Banjade,Rachel Etta Rudolph,Raefer Gabriel,Rahel Habacker,Ramon Risco Raphael Milliere,Rhythm Garg Richard BarnesRif A.Saurous,Riku Arakawa Robbe Raymaekers Robert Frank Rohan Sikand Roman NovakRoman SitelewRonan Le Bras Rosanne Liu Rowan Jacobs Rui Zhang Ruslan Salakhutdinov Ryan Chi Ryan Lee Ryan Stovall Ryan Teehan Ryan Yang Sahib Singh Saif Mohammad Sajant Anand Sam DillavouSam Shleifer,Sam Wiseman,Samuel Gruetter,Samuel R.Bowman,Samuel S. Schoenholz Sanghyun Han Sanjeev Kwatra Sarah A.Rous Sarik Ghazarian Sayan Ghosh Sean Casey Sebastian Bischoff Sebastian Gehrmann Sebastian Schuster Sepideh Sadeghi Shadi S. Hamdan Sharon Zhou Shashank Srivastava Sherry Shi Shikhar SinghShima Asaadi Shixiang Shane GuShubh PachchigarShubham ToshniwalShyam UpadhyayShyamolima DebnathSiamak Shakeri Simon Thormeyer Simone Melzi Siva ReddySneha Priscilla Makini Soo-Hwan Lee Spencer Bradley Torene,Sriharsha Hatwar Stanislas Dehaene Stefan Divic Stefano Ermon Stella Biderman Stephanie Lin Stephen Prasad Steven T Piantadosi Stuart M. Shieber Summer Misherghi Svetlana Kiritchenko Swaroop Mishra Tal Linzen Tal Schuster Tao Li Tao Yu Tariq AliTatsunori Hashimoto Te-Lin WuTheo Desbordes Theodore Rothschild Thomas Phan Tianle WangTiberius Nkinyili Timo Schick Timofei Kornev Titus Tunduny Tobias Gerstenberg Trenton ChangTrishala Neeraj Tushar Khot Tyler ShultzUri Shaham,Vedant Misra Vera DembergVictoria Nyamai Vikas Raunak Vinay Venkatesh Ramasesh Vinay Uday Prabhu Vishakh Padmakumar,Vivek Srikumar William FedusWilliam Saunders William Zhang Wout Vossen Xiang Ren Xiaoyu Tong Xinran Zhao Xinyi WuXudong Shen Yadollah YaghoobzadehYair Lakretz,Yangqiu Song,Yasaman Bahri,Yejin Choi,Yichi Yang,Yiding HaoYifu ChenYonatan BelinkovYu HouYu HouYuntao BaiZachary Seid Zhuoye Zhao Zijian Wang Zijie J.WangZirui Wang and Ziyi Wu Beyond the imitation game: Quantifying and extrapolating the capabilities of language models. ArXiv, abs/2206.04615 2022. URL https://api-semanticscholar.org/CorpusID:263625818.
320
+
321
+ Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Ed H. Chi, F. Xia, Quoc Le, and Denny Zhou. Chain of thought prompting elicits reasoning in large language models. ArXiv, abs/2201.11903, 2022. URL https://api_semanticscholar.org/CorpusID:246411621.
322
+
323
+ Yangzhen Wu, Zhiqing Sun, Shanda Li, Sean Welleck, and Yiming Yang. Inference scaling laws: An empirical analysis of compute-optimal inference for problem-solving with language models. 2024. URL https://api_semanticscholar.org/CorpusID:271601023.
324
+
325
+ An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, Huan Lin, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Yang, Jiaxi Yang, Jingren Zhou, Junyang Lin, Kai Dang, Keming Lu, Keqin Bao, Kexin Yang, Le Yu, Mei Li, Mingfeng Xue, Pei Zhang, Qin Zhu, Rui Men, Runji Lin, Tianhao Li, Tianyi Tang, Tingyu Xia, Xingzhang Ren, Xuancheng Ren, Yang Fan, Yang Su, Yichang Zhang, Yu Wan, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, and Zihan Qiu. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024.
326
+ Wenkai Yang, Shuming Ma, Yankai Lin, and Furu Wei. Towards thinking-optimal scaling of test-time compute for ltm reasoning. ArXiv, abs/2502.18080, 2025. URL https://apisemantic scholar.org/CorpusID:276580856.
327
+ Zishun Yu, Tengyu Xu, Di Jin, Karthik Abinav Sankararaman, Yun He, Wenxuan Zhou, Zhouhao Zeng, Eryk Helenowski, Chen Zhu, Si-Yuan Wang, Hao Ma, and Han Fang. Think smarter not harder: Adaptive reasoning with inference aware optimization. ArXiv, abs/2501.17974, 2025. URL https://api_semanticscholar.org/CorpusID:275994017.
328
+ Kaijie Zhu, Jiaao Chen, Jindong Wang, Neil Zhenqiang Gong, Diyi Yang, and Xing Xie. Dyval: Dynamic evaluation of large language models for reasoning tasks. arXiv preprint arXiv:2309.17167, 2023.
329
+
330
+ # A Appendix
331
+
332
+ # A.1 Additional DUMB500 dataset details
333
+
334
+ The dataset is categorized into four subsets, each containing multiple fine-grained categories:
335
+
336
+ # Mathematics (Math)
337
+
338
+ - Arithmetic: Addition, Subtraction, Multiplication, Division
339
+ - Comparison: Greater/Less than relationships
340
+ - Fractions & Percentages: Simple fraction and percentage comparisons
341
+ - Exponents & Roots: Squaring and square roots
342
+ - Unit Conversion: Basic metric conversions
343
+ - Patterns & Sequences: Identifying missing numbers in sequences
344
+ - Geometry: Recognizing shapes, angles, and basic geometric properties
345
+ - Logical Reasoning: Basic problem-solving using logic
346
+
347
+ # Conversational Interaction (Chats)
348
+
349
+ - Self-reflective: Questions involving introspection and emotional states
350
+ - Acknowledgment: Checking system responsiveness (e.g., "Can you see this?")
351
+ - Greetings & Casual Chat: Common greetings and informal small talk
352
+ - Commonsense Reasoning: Fundamental knowledge about the physical world (e.g., "Is water wet?")
353
+ - Object Interaction: Simple cause-effect relationships (e.g., "If I drop my phone, will it fall?")
354
+ - General Knowledge: Basic factual questions (e.g., "What is the capital of China?")
355
+
356
+ # Programming & Computing (Code)
357
+
358
+ - Basic Output: Printing text and numbers
359
+ - Variables & Data Types: Assigning and manipulating variables (numbers, strings)
360
+ - Mathematical Operations: Performing basic calculations in code
361
+ - User Input Handling: Handling user input in simple programs
362
+ - Conditional Statements: Basic if-else logic and checking conditions
363
+ - Loops & Iteration: Simple loops for repeated tasks
364
+ - Data Structures: Lists, dictionaries, sets, tuples
365
+ - Randomization: Generating random numbers and selections
366
+
367
+ # Task Execution (Tasks)
368
+
369
+ - Communication & Writing: Emails, Messages, Creative Writing, Social Media, Daily-life tasks
370
+ - Language & Text Processing: Paraphrasing, Translation, Sentence Transformations, Grammar Checking
371
+ - Analogy & Concept Matching: Identifying similar concepts and words
372
+
373
+ # A.2 DUMB500 Evaluation Rubrics
374
+
375
+ Each section contains the requirements that are checked by the LM judge to score TASK and CHAT answers in DUMB500. The score for a given answer is the rate of "yes".
376
+
377
+ # A.2.1 General Requirements
378
+
379
+ - Accuracy: Information must be correct and complete: "Does the response include all essential information requested?"
380
+ - Conciseness: Avoid unnecessary elaboration: "Does the response avoid unnecessary explanations and get straight to the point?"
381
+
382
+ # A.2.2 Task Rubrics
383
+
384
+ # Emails
385
+
386
+ - Formality Appropriateness: Level of formality must match context: "Is the level of formality appropriate for the context?"
387
+ - Example Question-Specific: For "Write a short email to Alice confirming a meeting at $3\mathrm{pm}$ ":
388
+
389
+ - "Is the email addressed to Alice?"
390
+ - "Does the email mention a meeting at 3PM?"
391
+
392
+ # Messages
393
+
394
+ - Tone Appropriateness: Must suit messaging context: "Is the tone suitable for the messaging context?"
395
+ - Format: Must be formatted as a text message: "Is the response formatted as a text message?"
396
+
397
+ # Paraphrasing
398
+
399
+ - Style Appropriateness: Must match requested style/tone: "Does the paraphrase match the requested style/tone?"
400
+ - Example Question-Specific: For "Make formal invitation casual":
401
+
402
+ - "Does the message instruct to RSVP by Thursday?"
403
+ - "Is the email addressed to colleagues?"
404
+
405
+ # Translation
406
+
407
+ Accuracy: Must provide correct translation: "Is the translation correct?"
408
+ - Example Question-Specific: For "Translate to French":
409
+
410
+ - "Does the sentence closely resemble: J'aime dire des livres pendant mon temps libre?"
411
+
412
+ # Words
413
+
414
+ - Relevance: Words must fit request context: "Are the provided words relevant to the request?"
415
+ - Contextual Appropriateness: Words must suit intended use: "Are the words appropriate for the context?"
416
+
417
+ # Creative-Writing
418
+
419
+ - Contextual Appropriateness: Must match specific context: "Does the response match the specific context of the creative writing task?"
420
+ - Length Requirements: Must follow specified length: "Does the response follow the length requirement if there's one?"
421
+
422
+ # Social-Media
423
+
424
+ - Platform Appropriateness: Must match platform conventions: "Does the content match the conventions of the specified platform?"
425
+ - Example Question-Specific: For "LinkedIn new job post":
426
+
427
+ "Does the post mention the job title and company?"
428
+
429
+ # Work
430
+
431
+ - Formality Appropriateness: Must match workplace context: "Is the response contains correct format as required?"
432
+ - Example Question-Specific: For "Slack message to manager":
433
+
434
+ - "Does the message respectfully address the manager?"
435
+ - "Does the message omit names?"
436
+
437
+ # A.2.3 Chat Rubrics
438
+
439
+ # Self-reflective
440
+
441
+ - Friendliness: Must show politeness: "Does the response show friendliness and politeness?"
442
+
443
+ # Acknowledgment
444
+
445
+ - Conciseness: Avoid overthinking simple queries: "Does the response avoid overthinking the intent behind simple queries?"
446
+
447
+ # Greetings
448
+
449
+ - Contextual Appropriateness: Must sound natural: "Does the greeting sound natural and human-like?"
450
+
451
+ # Daily-Chats
452
+
453
+ - Contextual Appropriateness: Must suit casual conversation: "Is the response appropriate for casual conversation?"
454
+
455
+ # Commonsense
456
+
457
+ - Conciseness: Avoid overthinking obvious answers: "Does the response avoid overthinking obvious answers?"
458
+
459
+ # Knowledge
460
+
461
+ - Conciseness: Share knowledge without excessive detail: "Is the knowledge shared without excessive detail?"
462
+
463
+ # A.3 Additional THOUGHTTERMINATOR details
464
+
465
+ # A.3.1 THOUGHTTERMINATOR component prompts
466
+
467
+ # Scheduling prompt:
468
+
469
+ Please generate an answer to the following question in {deadline} tokens: {prompt}. Messages of remaining time will be given as messages enclosed in <System></System> tags. Please provide you answer as **Answer:** or **Final Answer:** when complete.
470
+
471
+ # Interrupt prompt:
472
+
473
+ I have used {elapsed} tokens, and I have {remaining} tokens left to answer. To continue:
474
+
475
+ # Terminator prompt:
476
+
477
+ I'm out of time, I need to provide my final answer now, considering what I have computed so far. **Final Answer:**
478
+
479
+ # A.4 Supplementary Results
480
+
481
+ <table><tr><td>Setting</td><td>Acc.</td><td>Pass@5</td><td>Pass@10</td><td>Tokens</td></tr><tr><td colspan="5">MATH500</td></tr><tr><td>Base</td><td>0.47</td><td>0.78</td><td>0.81</td><td>3015</td></tr><tr><td>Naïve</td><td>0.52</td><td>0.78</td><td>0.82</td><td>1938</td></tr><tr><td>THOUGHTTERMINATOR</td><td>0.48</td><td>0.81</td><td>0.87</td><td>1590</td></tr><tr><td colspan="5">Zebra-logic</td></tr><tr><td>Base</td><td>0.03</td><td>0.095</td><td>0.135</td><td>3861</td></tr><tr><td>Naïve</td><td>0.22</td><td>0.575</td><td>0.755</td><td>1254</td></tr><tr><td>THOUGHTTERMINATOR</td><td>0.19</td><td>0.585</td><td>0.75</td><td>1368</td></tr><tr><td colspan="5">GPQA</td></tr><tr><td>Base</td><td>0.15</td><td>0.4096</td><td>0.5783</td><td>2815</td></tr><tr><td>Naïve</td><td>0.20</td><td>0.5783</td><td>0.7470</td><td>922</td></tr><tr><td>THOUGHTTERMINATOR</td><td>0.21</td><td>0.5542</td><td>0.7470</td><td>1279</td></tr><tr><td colspan="5">DUMB500</td></tr><tr><td>Base</td><td>0.58</td><td>0.9646</td><td>0.9735</td><td>3570</td></tr><tr><td>Naïve</td><td>0.37</td><td>0.7385</td><td>0.8154</td><td>377</td></tr><tr><td>THOUGHTTERMINATOR</td><td>0.67</td><td>0.9610</td><td>0.9610</td><td>447</td></tr></table>
482
+
483
+ Table 3: Comparison of performance and token spend of R1-1.5B under the Base Setting, with Naïve, and with THOUGHTTERMINATOR.
484
+
485
+ <table><tr><td>Model</td><td>Head only</td><td>Tail only</td><td>Head &amp; Tail</td><td>Tokens</td></tr><tr><td colspan="5">Non-reasoning language models</td></tr><tr><td>Qwen2-7B-Instruct</td><td>0.77</td><td>0.73</td><td>0.76</td><td>923</td></tr><tr><td>Llama-3.2-1B-Instruct</td><td>0.53</td><td>0.53</td><td>0.53</td><td>955</td></tr><tr><td>Llama-3.2-3B-Instruct</td><td>0.54</td><td>0.54</td><td>0.55</td><td>2069</td></tr><tr><td>Llama-3.1-8B-Instruct</td><td>0.48</td><td>0.41</td><td>0.49</td><td>9402</td></tr><tr><td>gemma-2-2b-it</td><td>0.90</td><td>0.90</td><td>0.90</td><td>73</td></tr><tr><td>gemma-2-9b-it</td><td>0.93</td><td>0.93</td><td>0.93</td><td>64</td></tr><tr><td>gemma-2-27b-it</td><td>0.76</td><td>0.76</td><td>0.76</td><td>96</td></tr><tr><td>deepseek-l1m-7b-chat</td><td>0.61</td><td>0.60</td><td>0.61</td><td>314</td></tr><tr><td colspan="5">Reasoning language models</td></tr><tr><td>QwQ-32B-Preview</td><td>0.72</td><td>0.66</td><td>0.71</td><td>1774</td></tr><tr><td>QwQ-32B</td><td>0.70</td><td>0.49</td><td>0.67</td><td>6712</td></tr><tr><td>DeepSeek-R1-Distill-Qwen-1.5B</td><td>0.59</td><td>0.58</td><td>0.58</td><td>3570</td></tr><tr><td>DeepSeek-R1-Distill-Qwen-7B</td><td>0.68</td><td>0.66</td><td>0.67</td><td>2042</td></tr><tr><td>DeepSeek-R1-Distill-Llama-8B</td><td>0.80</td><td>0.80</td><td>0.80</td><td>2053</td></tr></table>
486
+
487
+ Table 4: Accuracy and token usage across different models under different input truncation settings.
488
+
489
+ ![](images/fb9b1849b069a7edf1d21ec778b6979d82f1d253fbd649b31376d019aad7e044.jpg)
490
+ Figure 8: Pearson correlation of accuracies across different models on the MATH500 dataset
491
+
492
+ ![](images/4b0fcdcdff33836130a2a935b866fde64fff26901df853096f493aa05ca757b6.jpg)
493
+ Figure 9: Pearson correlation of accuracies across different models on the GPQA dataset
494
+
495
+ ![](images/dfdb6af3d2ca6c4f8f6bfc981c5b902e3a8630f58dc77711a06b10546d9fe515.jpg)
496
+ Figure 10: Pearson correlation of accuracies across different models on the Zebra dataset
data/2025/2504_13xxx/2504.13367/images/0866efb5f2950f68686baa975f2bd71c9f7d2b8a511fd667d849d97f15d4342b.jpg ADDED

Git LFS Details

  • SHA256: 7f06b540ddd476c3baace1bfbe32905bdcad3f5f84768b1ed92d2c9f839a436c
  • Pointer size: 130 Bytes
  • Size of remote file: 87.8 kB
data/2025/2504_13xxx/2504.13367/images/13527b3c58070716414b69e0269d27b00d185a55720e3870c1e52b0bb124ad8e.jpg ADDED

Git LFS Details

  • SHA256: ada233d56d2eb31883c3c59bc44f6dabcc310c30084f1aecd014e73ca83debd1
  • Pointer size: 129 Bytes
  • Size of remote file: 2.29 kB
data/2025/2504_13xxx/2504.13367/images/1fdea51374a5af38e976d087dd52dada79a30f34c4e95db72a81d13f280ac896.jpg ADDED

Git LFS Details

  • SHA256: f825a1c00f4d6f8fceed39d1970f713a8ff6a5b1f32493403b89b1ef41a6faec
  • Pointer size: 130 Bytes
  • Size of remote file: 10.8 kB
data/2025/2504_13xxx/2504.13367/images/30724602ecef27f0168d78aa0666aa13581e228b0798a45453395a9c78407762.jpg ADDED

Git LFS Details

  • SHA256: b1211fe7a8633becbc65f4def6121d0be3219e21574cbc7d2f5085b78868c75d
  • Pointer size: 130 Bytes
  • Size of remote file: 10.6 kB
data/2025/2504_13xxx/2504.13367/images/352e4f94f22f607ff8f695359842e116f03471cb231b1b06f92cb664ff578a85.jpg ADDED

Git LFS Details

  • SHA256: a25e0044221953545d24647b4b3285336a2a58fe053aee420279fab3e3402d5d
  • Pointer size: 129 Bytes
  • Size of remote file: 2.29 kB
data/2025/2504_13xxx/2504.13367/images/4a071855232fa6e57dbb04e6030e3559d5f9ce16318980b3f25950ab21835b1c.jpg ADDED

Git LFS Details

  • SHA256: 0facab4b15c8c4ae1b68c9c143a57925902d4d3933afc3e2266039a3136e3ce1
  • Pointer size: 130 Bytes
  • Size of remote file: 25.4 kB
data/2025/2504_13xxx/2504.13367/images/4b0fcdcdff33836130a2a935b866fde64fff26901df853096f493aa05ca757b6.jpg ADDED

Git LFS Details

  • SHA256: c7b6c9009eef1ff3f09ed14bec158bb4f4b54932543a9cb53b3edb0c72157d4e
  • Pointer size: 130 Bytes
  • Size of remote file: 99.9 kB
data/2025/2504_13xxx/2504.13367/images/538aa8e65541ed0336c847201d371c2296709a4338ae65ae25bf71ae082642dc.jpg ADDED

Git LFS Details

  • SHA256: fadccfd6414fe978f0597bf592ff6f1d5039f396b62e7eae7b74807254139fce
  • Pointer size: 129 Bytes
  • Size of remote file: 2.33 kB
data/2025/2504_13xxx/2504.13367/images/5432888fe6ad055a4a2f36e42336210e300e160a20f797c9a35f05da657395b4.jpg ADDED

Git LFS Details

  • SHA256: 1ee5ab6b0ab924e85a71c5d3e00cda36850fc46fd40494c30dfcb4ed1d74ad1e
  • Pointer size: 130 Bytes
  • Size of remote file: 23.5 kB
data/2025/2504_13xxx/2504.13367/images/5627bb5640e02f7267b21db83dc4183b1113146a8b00c31d9b9a477cafe2a540.jpg ADDED

Git LFS Details

  • SHA256: ddc8ed7c5c78f656f247ef58f0eb5fe69a9b22ebc1a62cbe09ae69fd41906f7d
  • Pointer size: 130 Bytes
  • Size of remote file: 13.8 kB
data/2025/2504_13xxx/2504.13367/images/57762d1e60e652fd131f3e09e77d78a77ae5436d64a718769ad214c606cb7b17.jpg ADDED

Git LFS Details

  • SHA256: 41e80928a7c4b4eae83e06b5f00123b4b8d150c0cf834c634d9b933cd9189357
  • Pointer size: 130 Bytes
  • Size of remote file: 10.6 kB
data/2025/2504_13xxx/2504.13367/images/581acd48568aa80c096caa667172e0fe4c3edfd17e5ec1c57c1612fbadf7a882.jpg ADDED

Git LFS Details

  • SHA256: 90247f47bacbfa36d3b5d1966a79f25a5262d4acecc02f0846c4b0cf6424d91a
  • Pointer size: 129 Bytes
  • Size of remote file: 2.38 kB
data/2025/2504_13xxx/2504.13367/images/59286ebd295a2115c6db701e88015e6a75246117891f5cdd7adecace969a81f0.jpg ADDED

Git LFS Details

  • SHA256: 17e04cecae421b828a62011c3b346e438454f02bf1186842afc2d76b8d18309d
  • Pointer size: 129 Bytes
  • Size of remote file: 8.23 kB
data/2025/2504_13xxx/2504.13367/images/5a035e480b4f48c7dedaadcca1309d1e06a7bde2d6b26423a9e2ecd7eb4a2adb.jpg ADDED

Git LFS Details

  • SHA256: c8aa8d8a42b4cf798f66bc8f16eb768fe6f51d4fb3e79b05a3df6e5476782dcb
  • Pointer size: 129 Bytes
  • Size of remote file: 9.04 kB
data/2025/2504_13xxx/2504.13367/images/6700178cb0c9508c948988b611b46ce8b0c8317c142f6d52afd4180f3a6d8158.jpg ADDED

Git LFS Details

  • SHA256: c5fc76c3963e91c8c9efe93ba7bd1c0bf2ecc0cac340eaf8ae452fa4e7b3a30f
  • Pointer size: 130 Bytes
  • Size of remote file: 10.2 kB
data/2025/2504_13xxx/2504.13367/images/682c4a69257fd7dd81c194606163169380868bc470ed364ffdf64c1588251b44.jpg ADDED

Git LFS Details

  • SHA256: 4a7b6592e3f6e071efb3c6a96ca0fec067867556c933e1fe797d63cf235e1777
  • Pointer size: 130 Bytes
  • Size of remote file: 16.9 kB
data/2025/2504_13xxx/2504.13367/images/72e35b4c40e40737d9860dacffe653edfe5e70af9b416602ff380f5cb1ee5ca4.jpg ADDED

Git LFS Details

  • SHA256: 3920b1722b7dedcd1e864c70dcd44c2666db75a2aa4a042f51ce22ac5ba00e79
  • Pointer size: 130 Bytes
  • Size of remote file: 12.9 kB
data/2025/2504_13xxx/2504.13367/images/759e54dcb199c1868fd8ab257be18fc25efe607549f01a548c1499fa91e96d72.jpg ADDED

Git LFS Details

  • SHA256: 61e00572fc02e7ece1a265a0c209c8f1f1890c0bf2fb1fa5a0e16d8012651e38
  • Pointer size: 130 Bytes
  • Size of remote file: 10.6 kB
data/2025/2504_13xxx/2504.13367/images/811d4bc555cff8efb45585028fa7f9cb6319de931fd55724b0e10f068a0e20f7.jpg ADDED

Git LFS Details

  • SHA256: 7cfdf2d7cc091836f67946ca10ad5a3edee697f56b93a0f13aeb951af0fb738f
  • Pointer size: 129 Bytes
  • Size of remote file: 2.19 kB
data/2025/2504_13xxx/2504.13367/images/87364e66d83fbfd29ef9111af5d78b99b22d3af1bd107989f236b69456627d79.jpg ADDED

Git LFS Details

  • SHA256: 142a4c36989e6c3656367c5b39305c6058242744af2d6c39432181cd2e63071b
  • Pointer size: 129 Bytes
  • Size of remote file: 2.24 kB
data/2025/2504_13xxx/2504.13367/images/89899d90f80f980839663034641a61668a0142bcfe1e2542c837d3074ea4dd84.jpg ADDED

Git LFS Details

  • SHA256: 1c56105d8fd73a76b100761a733c5f2e0a93428bed36a28a490d86e18bfa8730
  • Pointer size: 129 Bytes
  • Size of remote file: 9.68 kB
data/2025/2504_13xxx/2504.13367/images/8e21f1e860839d4ea22a7075d2360b946e6269ff7810d2faf192f17d790a918f.jpg ADDED

Git LFS Details

  • SHA256: ec00b780895fa88a57697287e65d86f95e07181567c11c1a292dd3522370421d
  • Pointer size: 130 Bytes
  • Size of remote file: 10.3 kB
data/2025/2504_13xxx/2504.13367/images/90e3ee2c43c84fd9252311bc8fc314f8fe11b34af70ffe25a6af3e71daa9c87c.jpg ADDED

Git LFS Details

  • SHA256: 3f28c18b33cbeb215f27d2283f901c0922a7198c56534fe25cd6eeaeafd89ce2
  • Pointer size: 131 Bytes
  • Size of remote file: 102 kB
data/2025/2504_13xxx/2504.13367/images/92e86fd9ceb7f94163041bf0cb974852de5485b8ade891ce3d1a7a653157e62a.jpg ADDED

Git LFS Details

  • SHA256: 77326fde6e3ee6ff20f8490afb01a58005acb36aa244e743a4d9dadb1aeb8e72
  • Pointer size: 130 Bytes
  • Size of remote file: 10.1 kB
data/2025/2504_13xxx/2504.13367/images/99165ac369c58d493982eab1ef883be89d4dfbe8888c47c5c9d898fe2c7be8f4.jpg ADDED

Git LFS Details

  • SHA256: 20a1f7b36d77f7d40185ff6d20d9d719ffe0a93448c20e36a1b3087dfdbabfd1
  • Pointer size: 130 Bytes
  • Size of remote file: 50.1 kB
data/2025/2504_13xxx/2504.13367/images/a069990096b936224abe029895e3f13e9a6b77ab66aefc1078003d0019c494be.jpg ADDED

Git LFS Details

  • SHA256: 701046106f3b569b48d024c65bf5b565a37a867c726a5e2bc631e9545e3764c2
  • Pointer size: 130 Bytes
  • Size of remote file: 73.8 kB
data/2025/2504_13xxx/2504.13367/images/a0e2646633e86a7c143335f4239d06298e073b34198d636c80a6bebb400d4c1e.jpg ADDED

Git LFS Details

  • SHA256: 8ce5330dc4d233da88b8d9e8c606dcd4af6961a342c130bb7a3970883d23e1d1
  • Pointer size: 130 Bytes
  • Size of remote file: 10.4 kB
data/2025/2504_13xxx/2504.13367/images/a39233c56e1bc88f704df64f0f6df4fed29ff764eb0ee298d3320f9a485427ca.jpg ADDED

Git LFS Details

  • SHA256: 25b25075d125434fd057835dc7f3f2572ed238b04799648606c2c067cb3f5336
  • Pointer size: 130 Bytes
  • Size of remote file: 10.1 kB
data/2025/2504_13xxx/2504.13367/images/b7a12f3ebd5be5d280f2d282c420536095e032f04755419c808ae068d4b326c8.jpg ADDED

Git LFS Details

  • SHA256: 807e1bb4d9211486c2931453bd0480dd5586fb2fccb26c69958c598a3194b1e7
  • Pointer size: 129 Bytes
  • Size of remote file: 2.04 kB
data/2025/2504_13xxx/2504.13367/images/c3d399dab9ecdfa2681a845b936a04f943ab15aa7fbd7fcbbc3b517f8c1c318e.jpg ADDED

Git LFS Details

  • SHA256: dfba6f8df923bbb5ed86bb51a629bc8ec537df9687fcd8164e89790880198bf5
  • Pointer size: 130 Bytes
  • Size of remote file: 10.1 kB
data/2025/2504_13xxx/2504.13367/images/c71bfc7f709c37fe7ea407702dfdf8bf2978a601cc473853b26a0955de2d0629.jpg ADDED

Git LFS Details

  • SHA256: f4f3a9becf8391b8bfe094bc53915696a132d90ce3da24ce9118ea171c41fef4
  • Pointer size: 129 Bytes
  • Size of remote file: 2.35 kB
data/2025/2504_13xxx/2504.13367/images/d7f678edad861cb21d30f22416b08ac0469573f0fc0e10be631d47bf4e7d63a4.jpg ADDED

Git LFS Details

  • SHA256: 101bedade34a718f37cb88a3dcb8024594a9e009a1ba72e398bf13c18bb5d759
  • Pointer size: 129 Bytes
  • Size of remote file: 2.29 kB
data/2025/2504_13xxx/2504.13367/images/ddca224efe50d1837f1143603cc77049dea6f0e73de6b9b754b90c3fe2772c4b.jpg ADDED

Git LFS Details

  • SHA256: f9a0ae4f1e8048ad0b719886836e2aa28931dd3e3f96559c12c69c408a5224f9
  • Pointer size: 129 Bytes
  • Size of remote file: 7.93 kB
data/2025/2504_13xxx/2504.13367/images/dee4ed60354ef72a6680045f881545b825ea445cb6273f9f580a4f803fb4fd33.jpg ADDED

Git LFS Details

  • SHA256: bb54d696b8060a728aa5646cc302e635f1c437b1998e990fbc89e80f68d3ad32
  • Pointer size: 130 Bytes
  • Size of remote file: 10.4 kB
data/2025/2504_13xxx/2504.13367/images/df13ded6d9154f47a91496e336a31a618bb28449dbf7c3458660c781ce2b3cbb.jpg ADDED

Git LFS Details

  • SHA256: 73f8f376a84b3dcba01b20d70ed9a4dc6fd84eb53c182bba196b6150878cb155
  • Pointer size: 129 Bytes
  • Size of remote file: 2.55 kB
data/2025/2504_13xxx/2504.13367/images/df96533c806e323da66fec49e0b56d5b2dace76596c95829ff5e4090b9f55a9c.jpg ADDED

Git LFS Details

  • SHA256: 22cf3ed4927a420f6ddca60531b44f9c9973bcf216f79fcb0ad1c384e1406c7b
  • Pointer size: 129 Bytes
  • Size of remote file: 2.42 kB
data/2025/2504_13xxx/2504.13367/images/dfdb6af3d2ca6c4f8f6bfc981c5b902e3a8630f58dc77711a06b10546d9fe515.jpg ADDED

Git LFS Details

  • SHA256: 880f7b093389ff7bf8b624d09a1ad0e613bcd4efb4d9ce5ef9e698349f03cfeb
  • Pointer size: 131 Bytes
  • Size of remote file: 101 kB
data/2025/2504_13xxx/2504.13367/images/e1ba5e633e285ccab04d1c816d0cb6775a0ca8921c5a15bbdc4c91e69ae2d042.jpg ADDED

Git LFS Details

  • SHA256: afb5e350afdac418d3e016eb14b15412a7e9d484dd2ca5423c1fcf95310ba355
  • Pointer size: 130 Bytes
  • Size of remote file: 10.8 kB
data/2025/2504_13xxx/2504.13367/images/e1d81fb4d67977d5ae0b112741cee7bd473e8b3fee75518a8801716d043709f9.jpg ADDED

Git LFS Details

  • SHA256: 5e359be1452d34788c75da5a1efa5987113c1b8ef9132362ebfd1c7d574c3f98
  • Pointer size: 129 Bytes
  • Size of remote file: 8.1 kB
data/2025/2504_13xxx/2504.13367/images/f3af9c4616ec29961937b3d6f7fac9ec81c7c1da8057695da4898fe6d0ee3661.jpg ADDED

Git LFS Details

  • SHA256: 06265370af366616d1c851157022a6ae916a67210f18881a68acaf536ba1b3d4
  • Pointer size: 130 Bytes
  • Size of remote file: 15.9 kB
data/2025/2504_13xxx/2504.13367/images/fb9b1849b069a7edf1d21ec778b6979d82f1d253fbd649b31376d019aad7e044.jpg ADDED

Git LFS Details

  • SHA256: c814a20f5a2aec02153d8aafd37ea856667894e281e7b0a7ffb1301ad9272f74
  • Pointer size: 130 Bytes
  • Size of remote file: 98.4 kB
data/2025/2504_13xxx/2504.13367/images/fc16d8aaca46c41ee51cad243c325e51becff01a305939429711c020680e223b.jpg ADDED

Git LFS Details

  • SHA256: 442ecc568ec61ab8c7f77fb0df8b70c46a3bb0b26e67a32019e30a2a5dcdac0d
  • Pointer size: 130 Bytes
  • Size of remote file: 95.1 kB
data/2025/2504_13xxx/2504.13367/images/fc31f37b8bec8b05e32f03990a4b59591c248971304d9e6cc93b1bb1a73fe0c5.jpg ADDED

Git LFS Details

  • SHA256: 5c37e9401fba8e6538e26a10c79abc9fb2a762d2cf7a612d2679254661530285
  • Pointer size: 130 Bytes
  • Size of remote file: 11.7 kB
data/2025/2504_13xxx/2504.13367/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2504_13xxx/2504.13406/2cedbc56-01e1-47be-9173-a7dd756bcc1a_content_list.json ADDED
@@ -0,0 +1,1873 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "LangCoop: Collaborative Driving with Language",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 248,
8
+ 130,
9
+ 750,
10
+ 152
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Xiangbo Gao $^{1}$ , Yuheng Wu $^{2}$ , Rujia Wang $^{1}$ , Chenxi Liu $^{3}$ , Yang Zhou $^{1}$ , Zhengzhong Tu $^{1*}$ , $^{1}$ Texas A&M University, $^{2}$ KAIST, $^{3}$ University of Utah",
17
+ "bbox": [
18
+ 151,
19
+ 179,
20
+ 843,
21
+ 220
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "{xiangbog,tzz}@tamu.edu",
28
+ "bbox": [
29
+ 393,
30
+ 222,
31
+ 602,
32
+ 237
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "* Corresponding author",
39
+ "bbox": [
40
+ 426,
41
+ 239,
42
+ 570,
43
+ 255
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "https://xiangbogaobarry.github.io/LangCoop",
50
+ "bbox": [
51
+ 364,
52
+ 258,
53
+ 627,
54
+ 272
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "text",
60
+ "text": "Abstract",
61
+ "text_level": 1,
62
+ "bbox": [
63
+ 259,
64
+ 306,
65
+ 336,
66
+ 323
67
+ ],
68
+ "page_idx": 0
69
+ },
70
+ {
71
+ "type": "text",
72
+ "text": "Multi-agent collaboration holds great promise for enhancing the safety, reliability, and mobility of autonomous driving systems by enabling information sharing among multiple connected agents. However, existing multi-agent communication approaches are hindered by limitations of existing communication media, including high bandwidth demands, agent heterogeneity, and information loss. To address these challenges, we introduce LangCoop, a new paradigm for collaborative autonomous driving that leverages natural language as a compact yet expressive medium for interagent communication. LangCoop features two key innovations: Mixture Model Modular Chain-of-thought $(M^3\\mathrm{CoT})$ for structured zero-shot vision-language reasoning and Natural Language Information Packaging (LangPack) for efficiently packaging information into concise, language-based messages. Through extensive experiments conducted in the CARLA simulations, we demonstrate that LangCoop achieves a remarkable $96\\%$ reduction in communication bandwidth ( $< 2KB$ per message) compared to image-based communication, while maintaining competitive driving performance in the closed-loop evaluation. Our project page and code are at https://xiangbogaobarry.github.io/LangCoop/.",
73
+ "bbox": [
74
+ 109,
75
+ 340,
76
+ 483,
77
+ 704
78
+ ],
79
+ "page_idx": 0
80
+ },
81
+ {
82
+ "type": "text",
83
+ "text": "1. Introduction",
84
+ "text_level": 1,
85
+ "bbox": [
86
+ 112,
87
+ 734,
88
+ 243,
89
+ 750
90
+ ],
91
+ "page_idx": 0
92
+ },
93
+ {
94
+ "type": "text",
95
+ "text": "Recent advances in autonomous driving have demonstrated that multi-agent collaboration [30] significantly enhances both safety and efficiency compared to single-vehicle operations, primarily through real-time information sharing and intention communication. This collaborative approach has become increasingly crucial as autonomous vehicles navigate complex environments where interaction with other traffic participants is inevitable and constant. However, the selection of an appropriate collaboration medium remains a critical chal",
96
+ "bbox": [
97
+ 109,
98
+ 760,
99
+ 482,
100
+ 912
101
+ ],
102
+ "page_idx": 0
103
+ },
104
+ {
105
+ "type": "text",
106
+ "text": "lenge that has attracted substantial research attention.",
107
+ "bbox": [
108
+ 511,
109
+ 309,
110
+ 864,
111
+ 323
112
+ ],
113
+ "page_idx": 0
114
+ },
115
+ {
116
+ "type": "text",
117
+ "text": "A key element of multi-agent collaboration is the medium used for inter-vehicle communication. Researchers have proposed various modalities for exchanging information, including: raw sensor data, neural network features, and downstream task results. Despite their utility, each of these communication media suffers from one or more critical drawbacks. Specifically, they often: (1) Require high bandwidth, placing a heavy load on communication infrastructures and increasing the risk of latency or packet loss. (2) Fail to accommodate the inherent heterogeneities across agents, which may use different sensor configurations, model architectures, or targeting on different downstream tasks. (3) Lose critical contextual information when data are overly compressed, abstracted, or otherwise transformed into a limited representation. (4) Does not support planning-level or control-level collaboration.",
118
+ "bbox": [
119
+ 509,
120
+ 330,
121
+ 883,
122
+ 587
123
+ ],
124
+ "page_idx": 0
125
+ },
126
+ {
127
+ "type": "text",
128
+ "text": "To address these issues, we propose that human natural language can serve as an effective communication medium for multi-agent collaborative driving. Unlike conventional sensor-based or feature-based communications, natural language is inherently flexible and capable of conveying a broad range of contextual and semantic cues, therefore offering additional advantages. First, it bridges the gap between machine-readable modalities [4] (e.g., numbers, features, embeddings) and human-spoken language, making the reasoning [25, 58], communication [22], negotiation [7], and decision-making process more transparent. Such transparency benefits research, development, and debugging by enabling human operators to understand and verify the messages being exchanged among autonomous vehicles. Second, ongoing research in leveraging LVLMs within autonomous driving has already demonstrated their utility in understanding [44], reasoning [52], decision-making [40, 56], and even low-level vehicle control [5]. Consequently, natural language collaboration can synergistically exploit the general intel",
129
+ "bbox": [
130
+ 509,
131
+ 595,
132
+ 883,
133
+ 912
134
+ ],
135
+ "page_idx": 0
136
+ },
137
+ {
138
+ "type": "aside_text",
139
+ "text": "arXiv:2504.13406v2 [cs.RO] 21 Apr 2025",
140
+ "bbox": [
141
+ 22,
142
+ 262,
143
+ 60,
144
+ 708
145
+ ],
146
+ "page_idx": 0
147
+ },
148
+ {
149
+ "type": "page_number",
150
+ "text": "1",
151
+ "bbox": [
152
+ 493,
153
+ 935,
154
+ 503,
155
+ 946
156
+ ],
157
+ "page_idx": 0
158
+ },
159
+ {
160
+ "type": "text",
161
+ "text": "ligence of LVLMs to achieve more robust, versatile, and explainable multi-agent collaboration. Third, natural language enables high-level fusion or negotiation at the planning and prediction levels, allowing agents—including automated vehicles, human drivers, and roadside units—to communicate intention and decision rationale rather than just perception data. This capability simplifies the coordination process, allowing agents to reach mutual understanding and agreements rapidly and clearly, ultimately promoting smoother, safer, and more socially acceptable driving behaviors. Lastly, natural language naturally provides scalability and generalization across diverse scenarios and heterogeneous vehicle platforms. Using standardized language-based communication seamlessly integrates autonomous and human-driven vehicles, regardless of sensor suites or underlying technologies. Moreover, natural language communication is inherently model-agnostic, compatible with both open-source (e.g. LLAMA [17], DeepSeek [18]) and commercial LLMs (e.g. ChatGPT [35], Gemini [43]), enabling easy adoption and interoperability across diverse autonomous vehicle systems.",
162
+ "bbox": [
163
+ 109,
164
+ 90,
165
+ 480,
166
+ 422
167
+ ],
168
+ "page_idx": 1
169
+ },
170
+ {
171
+ "type": "text",
172
+ "text": "Another compelling rationale emerges from real-world autonomous driving incidents, such as a case where a Waymo driverless car stopped dead inside a construction zone, causing disruptions and creating hazards [42]. Such incidents highlight the fundamental limitation of conventional sensor-based communication: it fails to transparently communicate the vehicle's internal decision-making and reasoning processes to nearby human drivers or traffic controllers. In contrast, an interface that uses natural language as a universal information protocol could explicitly communicate an autonomous vehicle's internal reasoning and intentions in real-time (e.g., \"I've stopped due to unclear construction signage\"), thereby clarifying otherwise confusing behaviors, reducing driver frustration, and facilitating timely human intervention. Furthermore, such a natural language-based approach allows real-time human-in-the-loop interaction, enabling remote operators or nearby traffic managers to quickly communicate or disengage with the vehicle in intuitive terms (e.g., \"Please move slowly to the side\") to promptly resolve ambiguous or problematic situations.",
173
+ "bbox": [
174
+ 109,
175
+ 426,
176
+ 482,
177
+ 758
178
+ ],
179
+ "page_idx": 1
180
+ },
181
+ {
182
+ "type": "text",
183
+ "text": "Leveraging these insights, we introduce LangCoop, a novel framework for collaborative autonomous driving that uses natural language as the primary medium for inter-vehicle communication. Our framework consists of three key components: (1) a Mixture Model Modular Chain-of-thought (M3CoT) module that structures reasoning into distinct stages for comprehensive scene understanding; (2) a Natural Language Information Packaging (LangPack) system that compresses rich semantic information into compact messages; and (3)",
184
+ "bbox": [
185
+ 109,
186
+ 761,
187
+ 482,
188
+ 912
189
+ ],
190
+ "page_idx": 1
191
+ },
192
+ {
193
+ "type": "text",
194
+ "text": "multiple driving signal generation approaches that translate natural language reasoning into actionable controls. Our experimental results in closed-loop evaluations using the Carla simulator [10] show that, by using zero-shot LVLMs, LangCoop achieves driving scores of up to 48.8 and route completion rates of up to $90.3\\%$ , significantly outperforming non-collaborative baselines while maintaining exceptional communication efficiency $(<2$ KB). The framework also operates effectively with heterogeneous agent capabilities, demonstrating the viability of natural language as a medium for autonomous vehicle collaboration.",
195
+ "bbox": [
196
+ 511,
197
+ 90,
198
+ 883,
199
+ 272
200
+ ],
201
+ "page_idx": 1
202
+ },
203
+ {
204
+ "type": "text",
205
+ "text": "2. Related Works",
206
+ "text_level": 1,
207
+ "bbox": [
208
+ 511,
209
+ 287,
210
+ 663,
211
+ 304
212
+ ],
213
+ "page_idx": 1
214
+ },
215
+ {
216
+ "type": "text",
217
+ "text": "2.1. LVLMs in Autonomous Driving",
218
+ "text_level": 1,
219
+ "bbox": [
220
+ 511,
221
+ 313,
222
+ 795,
223
+ 330
224
+ ],
225
+ "page_idx": 1
226
+ },
227
+ {
228
+ "type": "text",
229
+ "text": "The integration of Vision-Language Large Models (LVLMs) into autonomous driving has enabled a unified approach to perception, reasoning, and decision-making, offering enhanced interpretability and adaptability [8, 23, 32, 51]. Early studies have explored LVLMs for closed-loop driving, where multimodal sensor data is processed alongside natural language instructions to generate vehicle control outputs. Shao et al. [38] introduced one of the first LVLM-based end-to-end driving models, while Wang et al. [49] focused on translating language instructions into high-level driving commands. Xu et al. [56] and Sima et al. [40] further emphasized explainability, using question-answering and graph-based reasoning to interpret scene dynamics and decision rationales, making autonomous systems more transparent and human-interpretable. Hwang et al. [24] used LVLMs to directly output the future planning waypoints. Xing et al. [51] proposed a comprehensive benchmark for evaluating the truthfulness, safety, fairness, security, and generalizability of LVLMs in the autonomous driving scenes.",
230
+ "bbox": [
231
+ 511,
232
+ 335,
233
+ 883,
234
+ 654
235
+ ],
236
+ "page_idx": 1
237
+ },
238
+ {
239
+ "type": "text",
240
+ "text": "Beyond perception, LVLMs have demonstrated robustness in out-of-distribution (OOD) scenarios, addressing challenges that conventional deep-learning models struggle with in unseen environments. Wang et al. [48] showed that LVLMs could simulate novel situations through latent space editing, improving generalization. Mei et al. [33] introduced a dual-process framework, combining slow but rigorous reasoning from an LVLM with fast real-time execution from a smaller model, mimicking human cognitive processes. Additionally, Dong et al. [9] and Xing et al. [52] explored zero-shot prompting, demonstrating how LLMs can guide autonomous systems without extensive retraining.",
241
+ "bbox": [
242
+ 511,
243
+ 655,
244
+ 883,
245
+ 866
246
+ ],
247
+ "page_idx": 1
248
+ },
249
+ {
250
+ "type": "text",
251
+ "text": "LVLMs also play a pivotal role in multi-agent collaboration and human-centric driving by improving vehicular communication [50] and personalized decision",
252
+ "bbox": [
253
+ 511,
254
+ 867,
255
+ 883,
256
+ 912
257
+ ],
258
+ "page_idx": 1
259
+ },
260
+ {
261
+ "type": "page_number",
262
+ "text": "2",
263
+ "bbox": [
264
+ 493,
265
+ 935,
266
+ 504,
267
+ 946
268
+ ],
269
+ "page_idx": 1
270
+ },
271
+ {
272
+ "type": "text",
273
+ "text": "making [8, 50]. Liang et al. [28] and Zhang et al. [62] explored how generative AI models enable semantic-rich, context-aware inter-vehicle communication, surpassing traditional bandwidth-intensive numeric exchanges. In personalized driving, Li et al. [27] highlighted that LVLMs improve context understanding and human-like reasoning, while Lan et al. [26] and Duan et al. [11] demonstrated their ability to simulate human driving behaviors and dynamically adjust trajectories. As LVLMs continue evolving, their integration into autonomous systems paves the way for more interpretable, adaptable, and collaborative driving solutions that better align with human expectations and real-world challenges.",
274
+ "bbox": [
275
+ 109,
276
+ 90,
277
+ 480,
278
+ 303
279
+ ],
280
+ "page_idx": 2
281
+ },
282
+ {
283
+ "type": "text",
284
+ "text": "2.2. Collaboration Medium in Multi-agent Driving",
285
+ "text_level": 1,
286
+ "bbox": [
287
+ 112,
288
+ 314,
289
+ 480,
290
+ 345
291
+ ],
292
+ "page_idx": 2
293
+ },
294
+ {
295
+ "type": "text",
296
+ "text": "Effective collaboration among autonomous agents in multi-agent driving scenarios hinges on the choice of communication medium. Several approaches have been explored, including the exchange of raw sensor data [1, 3, 14], neural network features [2, 6, 15, 19, 29, 45-47, 53, 55, 60], and perception results [13, 16, 34, 39, 61]. Specifically, raw sensor data (such as LiDAR point clouds or camera images) offers comprehensive environmental perception but demands high communication bandwidth and latency. Meanwhile, neural network features (intermediate embeddings, BEV feature maps, or feature queries) can reduce bandwidth usage yet introduce incompatibility when agents rely on heterogeneous feature extraction networks. Another approach is sharing perception results, such as predicted depth maps [21], object detection outputs [54], occupancy grids [41], or BEV map segmentations [55]. While enumerating all possible perception outputs can strain communication bandwidth, limiting the shared set risks losing critical semantic details.",
297
+ "bbox": [
298
+ 109,
299
+ 352,
300
+ 482,
301
+ 654
302
+ ],
303
+ "page_idx": 2
304
+ },
305
+ {
306
+ "type": "text",
307
+ "text": "Given these challenges, natural language has emerged as a promising alternative for communication in multi-agent driving. Unlike numeric-based representations, natural language is compact, human-interpretable, and adaptable to heterogeneous agents. It also supports planning or control interactions. Recent studies in robotics and autonomous driving have begun to explore language-based communication, leveraging its ability to capture rich contextual information with minimal overhead.. For instance, Hu et al. [20], Yao et al. [57], and Fang et al. [12] use Large Language Models (LLMs) for driving-scenario reasoning on highly abstract traffic descriptions but overlook pedestrians, cyclists, unknown obstacles, and environmental conditions that are pivotal in real-world driving. Another approach, V2V-LLM [4], augments an LLM backbone with pretrained perception features (such as object detections)",
308
+ "bbox": [
309
+ 109,
310
+ 656,
311
+ 482,
312
+ 912
313
+ ],
314
+ "page_idx": 2
315
+ },
316
+ {
317
+ "type": "text",
318
+ "text": "to incorporate environmental cues. However, it does not exploit the vision-based reasoning capabilities of LVLMs. V2X-VLM [59] is the first work to combine perception and reasoning within a LVLM framework, yet it essentially treats multi-agent collaboration as a multi-sensor fusion problem, neglecting important factors like cross-sensor coordination transformations and collaboration at the planning or control level. Moreover, its evaluation remains limited to open-loop benchmarks, and its model is not open-sourced.",
319
+ "bbox": [
320
+ 511,
321
+ 90,
322
+ 883,
323
+ 242
324
+ ],
325
+ "page_idx": 2
326
+ },
327
+ {
328
+ "type": "text",
329
+ "text": "In this work, we advance the field by harnessing both the perception and reasoning capabilities of LVLMs, enabling planning- and control-level collaboration among autonomous vehicular agents. Unlike previous approaches, we conduct closed-loop evaluations to assess real-time performance and provide open-source code for the research community to facilitate further exploration and benchmarking.",
330
+ "bbox": [
331
+ 511,
332
+ 244,
333
+ 883,
334
+ 367
335
+ ],
336
+ "page_idx": 2
337
+ },
338
+ {
339
+ "type": "text",
340
+ "text": "3. Methodology",
341
+ "text_level": 1,
342
+ "bbox": [
343
+ 511,
344
+ 388,
345
+ 648,
346
+ 406
347
+ ],
348
+ "page_idx": 2
349
+ },
350
+ {
351
+ "type": "text",
352
+ "text": "3.1. Framework Overview",
353
+ "text_level": 1,
354
+ "bbox": [
355
+ 511,
356
+ 416,
357
+ 718,
358
+ 431
359
+ ],
360
+ "page_idx": 2
361
+ },
362
+ {
363
+ "type": "text",
364
+ "text": "In this section, we present LangCoop, a novel framework that natively leverages Large Vision Language Models (LVLMs) for collaborative driving among Connected Autonomous Vehicles (CAVs). As illustrated in Fig. 1, our framework establishes a systematic pipeline for information extraction, processing, exchange, and decision-making in collaborative driving scenarios. Each CAV initially captures front-view images through its onboard cameras, which serve as the primary sensory input. These images are passed through our Mixture Model Modular Chain-of-thought $(\\mathrm{M}^{3}\\mathrm{CoT})$ module (detailed in Section 3.2), which systematically extracts environmental and object-level information as well as process goal-oriented information, and behavioral intentions.",
365
+ "bbox": [
366
+ 511,
367
+ 440,
368
+ 883,
369
+ 667
370
+ ],
371
+ "page_idx": 2
372
+ },
373
+ {
374
+ "type": "text",
375
+ "text": "The extracted information is then packaged into a compact, structured natural language format via our Natural Language Information Packaging (LangPack) module. This standardized format facilitates information exchange between connected vehicles while minimizing bandwidth requirements. Concurrently, each vehicle receives packets from other CAVs within the communication range. Upon receiving the packets, each vehicle integrates the messages with its own and feeds them into the LVLMs to generate appropriate driving signals. The driving signals are formulated as discrete trajectories, continuous trajectories, or direct control commands depending on the specific implementation context (detailed in Section 3.4). These signals guide the vehicle's planning and control systems to execute safe and efficient maneuvers.",
376
+ "bbox": [
377
+ 511,
378
+ 670,
379
+ 883,
380
+ 912
381
+ ],
382
+ "page_idx": 2
383
+ },
384
+ {
385
+ "type": "page_number",
386
+ "text": "3",
387
+ "bbox": [
388
+ 493,
389
+ 936,
390
+ 504,
391
+ 946
392
+ ],
393
+ "page_idx": 2
394
+ },
395
+ {
396
+ "type": "image",
397
+ "img_path": "images/19fb98dcc48fbe3eb6993e5d3fdf48004ea5fa1f49b51e61f4ffa788cece5eda.jpg",
398
+ "image_caption": [
399
+ "Figure 1. Overview of the LangCoop framework."
400
+ ],
401
+ "image_footnote": [],
402
+ "bbox": [
403
+ 127,
404
+ 99,
405
+ 869,
406
+ 364
407
+ ],
408
+ "page_idx": 3
409
+ },
410
+ {
411
+ "type": "text",
412
+ "text": "3.2. Mixture Model Modular Chain-of-thought",
413
+ "text_level": 1,
414
+ "bbox": [
415
+ 112,
416
+ 419,
417
+ 475,
418
+ 435
419
+ ],
420
+ "page_idx": 3
421
+ },
422
+ {
423
+ "type": "text",
424
+ "text": "The Mixture Model Modular Chain-of-thought (M $^3$ CoT) module forms the cognitive foundation of our Lang-Coop framework, expanding upon the chain-of-thought reasoning process introduced by OpenEmma [52]. $\\mathrm{M}^3\\mathrm{CoT}$ systematically decomposes the complex task of driving scene understanding into four distinct prompting stages, each addressing a specific aspect of the driving context: driving scene description that focuses on holistic environmental understanding, interactive object description that identifies and characterizes specific objects relevant to the driving task, navigation goal prompting that informs the agent about its next navigational goal's relative location, shifting the agent's perspective from mere trajectory prediction to goal-oriented planning, and finally future intent description that articulates the vehicle's intended actions and decision rationale.",
425
+ "bbox": [
426
+ 111,
427
+ 441,
428
+ 483,
429
+ 698
430
+ ],
431
+ "page_idx": 3
432
+ },
433
+ {
434
+ "type": "text",
435
+ "text": "A key innovation in our approach is the flexibility to employ different specialized LVLMs for each prompting stage. This design choice offers several significant advantages: First, it acknowledges that different prompting tasks demand distinct capabilities—driving scene and object description rely predominantly on visual understanding capabilities, while navigation goal interpretation and future intent formulation necessitate stronger logical reasoning skills. By selecting models optimized for these specific competencies, our system potentially outperforms monolithic approaches that use a single model for all tasks. Second, this modular design offers practical benefits related to computational efficiency and cost management. Given that zero-shot",
436
+ "bbox": [
437
+ 111,
438
+ 700,
439
+ 483,
440
+ 912
441
+ ],
442
+ "page_idx": 3
443
+ },
444
+ {
445
+ "type": "text",
446
+ "text": "LVLM inference can be resource-intensive, particularly for high-performance models, our approach allows for strategic resource allocation—deploying more powerful (and potentially more expensive) models only for the stages that critically require their capabilities. This alleviates the need for a single large generalist model, potentially reducing inference time and operational costs without compromising performance.",
447
+ "bbox": [
448
+ 511,
449
+ 420,
450
+ 883,
451
+ 541
452
+ ],
453
+ "page_idx": 3
454
+ },
455
+ {
456
+ "type": "text",
457
+ "text": "3.3. Natural Language Information Packaging",
458
+ "text_level": 1,
459
+ "bbox": [
460
+ 511,
461
+ 555,
462
+ 872,
463
+ 571
464
+ ],
465
+ "page_idx": 3
466
+ },
467
+ {
468
+ "type": "text",
469
+ "text": "Our framework introduces Natural Language Information Packaging (LangPack) as an innovative medium for information sharing. LangPack gathers diverse information sources into a standardized, human-readable, and machine-processable format that balances comprehensiveness with transmission efficiency. Upon completing the $\\mathrm{M}^{3}\\mathrm{CoT}$ processing stages, each vehicle constructs a LangPack packet that integrates prompting results with agent metadata, including location, velocity, acceleration, etc.",
470
+ "bbox": [
471
+ 511,
472
+ 578,
473
+ 883,
474
+ 728
475
+ ],
476
+ "page_idx": 3
477
+ },
478
+ {
479
+ "type": "text",
480
+ "text": "The LangPack approach offers several distinct advantages for collaborative driving systems. First, the inherent compactness of natural language representation allows for information-dense communication with minimal bandwidth requirements—typical LangPack packages require less than 2KB of data, making them suitable for transmission even in bandwidth-constrained V2X communication environments. Furthermore, natural language provides a flexible and extensible medium that can accommodate diverse information types without requiring rigid structural redesigns. This adaptability is particularly valuable for autonomous driving systems",
481
+ "bbox": [
482
+ 511,
483
+ 731,
484
+ 883,
485
+ 912
486
+ ],
487
+ "page_idx": 3
488
+ },
489
+ {
490
+ "type": "page_number",
491
+ "text": "4",
492
+ "bbox": [
493
+ 493,
494
+ 936,
495
+ 504,
496
+ 946
497
+ ],
498
+ "page_idx": 3
499
+ },
500
+ {
501
+ "type": "text",
502
+ "text": "Table 1. Natural Language Information Packaging Structure.",
503
+ "bbox": [
504
+ 117,
505
+ 89,
506
+ 477,
507
+ 103
508
+ ],
509
+ "page_idx": 4
510
+ },
511
+ {
512
+ "type": "text",
513
+ "text": "Natural Language Information Packaging",
514
+ "text_level": 1,
515
+ "bbox": [
516
+ 127,
517
+ 117,
518
+ 415,
519
+ 132
520
+ ],
521
+ "page_idx": 4
522
+ },
523
+ {
524
+ "type": "text",
525
+ "text": "Agent Metadata: location, velocity, acceleration, etc.",
526
+ "bbox": [
527
+ 132,
528
+ 137,
529
+ 454,
530
+ 151
531
+ ],
532
+ "page_idx": 4
533
+ },
534
+ {
535
+ "type": "text",
536
+ "text": "Scene Description: The image shows ...",
537
+ "bbox": [
538
+ 132,
539
+ 155,
540
+ 377,
541
+ 169
542
+ ],
543
+ "page_idx": 4
544
+ },
545
+ {
546
+ "type": "text",
547
+ "text": "Objects Description: Vehicle (light-colored car) - Moving forward ...",
548
+ "bbox": [
549
+ 132,
550
+ 172,
551
+ 462,
552
+ 200
553
+ ],
554
+ "page_idx": 4
555
+ },
556
+ {
557
+ "type": "text",
558
+ "text": "Navigation Goal: We need to keep moving ahead ...",
559
+ "bbox": [
560
+ 132,
561
+ 205,
562
+ 444,
563
+ 219
564
+ ],
565
+ "page_idx": 4
566
+ },
567
+ {
568
+ "type": "text",
569
+ "text": "Intent Description: Slight left adjustment while maintaining safe ...",
570
+ "bbox": [
571
+ 132,
572
+ 223,
573
+ 462,
574
+ 251
575
+ ],
576
+ "page_idx": 4
577
+ },
578
+ {
579
+ "type": "text",
580
+ "text": "Total Package Size: $< 2\\mathbf{KB}$",
581
+ "bbox": [
582
+ 132,
583
+ 273,
584
+ 299,
585
+ 287
586
+ ],
587
+ "page_idx": 4
588
+ },
589
+ {
590
+ "type": "text",
591
+ "text": "that must process heterogeneous and sometimes unexpected environmental elements.",
592
+ "bbox": [
593
+ 111,
594
+ 316,
595
+ 480,
596
+ 345
597
+ ],
598
+ "page_idx": 4
599
+ },
600
+ {
601
+ "type": "text",
602
+ "text": "Upon receiving LangPack packages from other connected vehicles, each CAV performs essential post-processing operations including coordinate transformation and temporal alignment. The processed information is then aggregated with the vehicle's own perceptions and prompting results to create a comprehensive knowledge ready to be passed into the following decision-making module.",
603
+ "bbox": [
604
+ 111,
605
+ 348,
606
+ 480,
607
+ 468
608
+ ],
609
+ "page_idx": 4
610
+ },
611
+ {
612
+ "type": "text",
613
+ "text": "3.4. Driving Signal Generation",
614
+ "text_level": 1,
615
+ "bbox": [
616
+ 112,
617
+ 479,
618
+ 349,
619
+ 494
620
+ ],
621
+ "page_idx": 4
622
+ },
623
+ {
624
+ "type": "text",
625
+ "text": "The final component of our LangCoop framework involves translating the aggregated, multi-vehicle understanding into actionable driving signals. We propose three driving signal formulations, each offering specific advantages depending on the implementation context and downstream control requirements:",
626
+ "bbox": [
627
+ 111,
628
+ 502,
629
+ 480,
630
+ 592
631
+ ],
632
+ "page_idx": 4
633
+ },
634
+ {
635
+ "type": "text",
636
+ "text": "Discrete Trajectory Generation: The LVLM outputs a sequence of waypoints $(x_{i},y_{i})$ for the future $n$ seconds. This high-precision path representation is suitable for complex maneuvers and enables straightforward validation against environmental boundaries.",
637
+ "bbox": [
638
+ 111,
639
+ 593,
640
+ 480,
641
+ 667
642
+ ],
643
+ "page_idx": 4
644
+ },
645
+ {
646
+ "type": "text",
647
+ "text": "Continuous Trajectory Generation: Rather than discrete positions, this approach defines vehicle motion through speed and turning curvature parameters over time. It produces smoother motion profiles that better align with vehicle dynamics for natural-feeling behavior.",
648
+ "bbox": [
649
+ 111,
650
+ 669,
651
+ 480,
652
+ 758
653
+ ],
654
+ "page_idx": 4
655
+ },
656
+ {
657
+ "type": "text",
658
+ "text": "Direct Control Signal Generation: In this most direct formulation, the LVLM outputs low-level control signals—specifically steering angle, throttle position, and brake pressure—for each time step. A key advantage of this approach is that outputs can be explicitly constrained within physically feasible ranges (e.g., steering angle limits, maximum acceleration rates), ensuring generated commands never exceed the vehicle's operational capabilities.",
659
+ "bbox": [
660
+ 111,
661
+ 760,
662
+ 480,
663
+ 895
664
+ ],
665
+ "page_idx": 4
666
+ },
667
+ {
668
+ "type": "text",
669
+ "text": "In Section 4.2, we present a comparative analysis of all",
670
+ "bbox": [
671
+ 112,
672
+ 896,
673
+ 480,
674
+ 911
675
+ ],
676
+ "page_idx": 4
677
+ },
678
+ {
679
+ "type": "text",
680
+ "text": "three driving signal formulations across diverse driving scenarios.",
681
+ "bbox": [
682
+ 511,
683
+ 90,
684
+ 882,
685
+ 119
686
+ ],
687
+ "page_idx": 4
688
+ },
689
+ {
690
+ "type": "text",
691
+ "text": "4. Experiments",
692
+ "text_level": 1,
693
+ "bbox": [
694
+ 513,
695
+ 133,
696
+ 643,
697
+ 148
698
+ ],
699
+ "page_idx": 4
700
+ },
701
+ {
702
+ "type": "text",
703
+ "text": "In this section, we present comprehensive experimental evaluations of our LangCoop framework through closed-loop simulations in the CARLA environment [10]. We first outline our experimental setup and evaluation metrics (§ 4.1), followed by a systematic assessment of key components within our framework, including driving signal formulations (§ 4.2), prompting methods (§ 4.3), communication strategies (§ 4.4), LVLM selection (§ 4.5), and modular design approaches (§ 4.6). We investigate the framework's performance under heterogeneous agent configurations [15, 31] (§ 4.7). Finally, we display some visualization results and analysis in § 4.8.",
704
+ "bbox": [
705
+ 511,
706
+ 157,
707
+ 883,
708
+ 354
709
+ ],
710
+ "page_idx": 4
711
+ },
712
+ {
713
+ "type": "text",
714
+ "text": "4.1. Experimental Setup",
715
+ "text_level": 1,
716
+ "bbox": [
717
+ 511,
718
+ 362,
719
+ 702,
720
+ 378
721
+ ],
722
+ "page_idx": 4
723
+ },
724
+ {
725
+ "type": "text",
726
+ "text": "In this work we conduct closed-loop evaluations using the CARLA simulation platform. We use 10 testing scenarios in Town05 with each scenario involves two CAVs controlled by our LangCoop framework while interacting with various dynamic actors including other vehicles, pedestrians, and cyclists controlled by CARLA's traffic manager. The two CAVs are initialized at different positions within the same general vicinity. We implement V2V communication with a simulated range of 200 meters. For perception, each vehicle receives frontview RGB camera images at $800 \\times 600$ resolution.",
727
+ "bbox": [
728
+ 511,
729
+ 383,
730
+ 883,
731
+ 549
732
+ ],
733
+ "page_idx": 4
734
+ },
735
+ {
736
+ "type": "text",
737
+ "text": "We employ three primary evaluation metrics to assess performance comprehensively: Driving Score (DS): Calculated as $\\mathrm{DS} = \\mathrm{RC} \\times (1 - \\mathrm{IP})$ , where RC is route completion and IP is infraction penalty. Infractions include collisions, traffic light violations, and lane invasions, each weighted according to severity. Route Completion (RC): The percentage of the predefined route successfully traversed by the vehicle, measured from 0 to 100. Time Consumed (TC): The total time in seconds required to complete the route or until a terminal failure. For communication efficiency assessment, we additionally track: Transmission Bandwidth (TB): The average data size in KB transmitted between vehicles.",
738
+ "bbox": [
739
+ 511,
740
+ 551,
741
+ 882,
742
+ 744
743
+ ],
744
+ "page_idx": 4
745
+ },
746
+ {
747
+ "type": "text",
748
+ "text": "Unless otherwise specified, our baseline configuration employs GPT-4o-mini [36] as the LVLM, utilizes a concise version of the $\\mathrm{M}^3\\mathrm{CoT}$ module described in Section 3.2, and exchanges both front-view images (compressed JPEG) and LangPack messages between vehicles.",
749
+ "bbox": [
750
+ 511,
751
+ 746,
752
+ 882,
753
+ 835
754
+ ],
755
+ "page_idx": 4
756
+ },
757
+ {
758
+ "type": "text",
759
+ "text": "4.2. Driving Signal Comparison",
760
+ "text_level": 1,
761
+ "bbox": [
762
+ 511,
763
+ 844,
764
+ 759,
765
+ 859
766
+ ],
767
+ "page_idx": 4
768
+ },
769
+ {
770
+ "type": "text",
771
+ "text": "As described in Section 3.4, our framework supports three distinct driving signal formulations: discrete trajectory, continuous trajectory, and direct control signals.",
772
+ "bbox": [
773
+ 511,
774
+ 867,
775
+ 882,
776
+ 912
777
+ ],
778
+ "page_idx": 4
779
+ },
780
+ {
781
+ "type": "page_number",
782
+ "text": "5",
783
+ "bbox": [
784
+ 493,
785
+ 936,
786
+ 503,
787
+ 946
788
+ ],
789
+ "page_idx": 4
790
+ },
791
+ {
792
+ "type": "text",
793
+ "text": "We first compare these formulations to identify the most effective approach for subsequent experiments.",
794
+ "bbox": [
795
+ 112,
796
+ 90,
797
+ 485,
798
+ 122
799
+ ],
800
+ "page_idx": 5
801
+ },
802
+ {
803
+ "type": "table",
804
+ "img_path": "images/86e09fc9cb3fb4e2e9b460f446d09d937c5fa4906c7e5349819a445312796558.jpg",
805
+ "table_caption": [
806
+ "Table 2. Performance comparison of different driving signal formulations. The discrete trajectory approach performs poorly while continuous trajectory and direct control signals show much stronger performance."
807
+ ],
808
+ "table_footnote": [],
809
+ "table_body": "<table><tr><td rowspan=\"2\">Driving Signal</td><td colspan=\"2\">Vehicle 1</td><td colspan=\"2\">Vehicle 2</td><td rowspan=\"2\">TC(s)↓</td></tr><tr><td>DS↑</td><td>RC↑</td><td>DS↑</td><td>RC↑</td></tr><tr><td>Discrete Traj.</td><td>5.0</td><td>23.1</td><td>1.3</td><td>19.4</td><td>139.9</td></tr><tr><td>Continuous Traj.</td><td>33.1</td><td>74.9</td><td>48.8</td><td>90.3</td><td>124.6</td></tr><tr><td>Control Signal</td><td>33.7</td><td>89.0</td><td>18.1</td><td>70.2</td><td>124.8</td></tr></table>",
810
+ "bbox": [
811
+ 117,
812
+ 203,
813
+ 478,
814
+ 287
815
+ ],
816
+ "page_idx": 5
817
+ },
818
+ {
819
+ "type": "text",
820
+ "text": "Table 2 reveals that the discrete trajectory approach performs poorly for both vehicles. This underperformance can be attributed to the poor capability of LVLMs towards discrete waypoints understandings—it is hard for zero-shot LVLMs to output discrete waypoints that are smooth and dynamically feasible. In comparison, both continuous trajectory and direct control signal approaches demonstrate better performance. The continuous trajectory formulation achieves better performance for Vehicle 2 (DS: 48.8, RC: 90.3), while the direct control signal approach has better performance for Vehicle 1 (DS: 33.7, RC: 89.0). The continuous trajectory approach also finish the route slightly faster than other methods. We postulate that the strong performance of the continuous trajectory and direct control signal approaches stems from a more natural action space that better aligns with vehicle dynamics and control systems. Based on these results, we adopt the continuous trajectory approach as our default driving signal formulation for subsequent experiments for its balance of performance across both vehicles.",
821
+ "bbox": [
822
+ 111,
823
+ 301,
824
+ 483,
825
+ 619
826
+ ],
827
+ "page_idx": 5
828
+ },
829
+ {
830
+ "type": "text",
831
+ "text": "4.3. Prompting Methods Comparison",
832
+ "text_level": 1,
833
+ "bbox": [
834
+ 112,
835
+ 631,
836
+ 403,
837
+ 648
838
+ ],
839
+ "page_idx": 5
840
+ },
841
+ {
842
+ "type": "text",
843
+ "text": "Next, we evaluate three different prompting strategies to assess the impact of reasoning structure on driving performance: Naive Prompting, which directly asks the LVLM to generate driving signals without structured reasoning, Chain-of-thought (CoT), and Concise CoT. The concise CoT variation is inducing LVLMs to output a more concise description by simply adding \"Please be very concise\" at the end of each prompt.",
844
+ "bbox": [
845
+ 111,
846
+ 654,
847
+ 482,
848
+ 775
849
+ ],
850
+ "page_idx": 5
851
+ },
852
+ {
853
+ "type": "text",
854
+ "text": "Table 3 demonstrates that the naive prompting approach performs poorly for both vehicles. This underscores the critical importance of structured reasoning for the autonomous driving task. Both CoT approaches substantially outperform the naive method, where there is no prominent performance priority between standard and concise CoT. The standard CoT approach achieves the highest performance for Vehicle 1 (DS: 37.0, RC: 85.2) and completes navigation in the shortest time",
855
+ "bbox": [
856
+ 111,
857
+ 776,
858
+ 482,
859
+ 912
860
+ ],
861
+ "page_idx": 5
862
+ },
863
+ {
864
+ "type": "table",
865
+ "img_path": "images/50a4d0151be7b208329463eccaee5af0c94d017356cd9b49769d0671a878a538.jpg",
866
+ "table_caption": [
867
+ "Table 3. Performance comparison of different prompting methods. The naive approach performs poorly, while both CoT approaches demonstrate strong performance."
868
+ ],
869
+ "table_footnote": [],
870
+ "table_body": "<table><tr><td rowspan=\"2\">Prompting</td><td colspan=\"2\">Vehicle 1</td><td colspan=\"2\">Vehicle 2</td><td rowspan=\"2\">TC(s)↓</td></tr><tr><td>DS↑</td><td>RC↑</td><td>DS↑</td><td>RC↑</td></tr><tr><td>Naive</td><td>2.7</td><td>23.0</td><td>0.7</td><td>21.1</td><td>248.7</td></tr><tr><td>CoT</td><td>37.0</td><td>85.2</td><td>41.1</td><td>80.3</td><td>105.2</td></tr><tr><td>CoT (concise)</td><td>33.1</td><td>74.9</td><td>48.8</td><td>90.3</td><td>124.6</td></tr></table>",
871
+ "bbox": [
872
+ 524,
873
+ 142,
874
+ 870,
875
+ 226
876
+ ],
877
+ "page_idx": 5
878
+ },
879
+ {
880
+ "type": "text",
881
+ "text": "(105.2 seconds). Meanwhile, the concise CoT variation achieves the best performance for Vehicle 2 (DS: 48.8, RC: 90.3). The performance differences between standard and concise CoT prompting highlight an interesting tradeoff. The standard CoT provides more comprehensive reasoning, potentially allowing for more nuanced decision-making, while the concise version reduces computational overhead and may focus the model on the most critical aspects of the driving task. For subsequent experiments, we adopt the concise CoT method as our default prompting strategy, as it provides strong overall performance while maintaining computational efficiency.",
882
+ "bbox": [
883
+ 511,
884
+ 250,
885
+ 883,
886
+ 446
887
+ ],
888
+ "page_idx": 5
889
+ },
890
+ {
891
+ "type": "text",
892
+ "text": "4.4. Communicative Message Comparison",
893
+ "text_level": 1,
894
+ "bbox": [
895
+ 511,
896
+ 457,
897
+ 841,
898
+ 474
899
+ ],
900
+ "page_idx": 5
901
+ },
902
+ {
903
+ "type": "text",
904
+ "text": "A central aspect of our collaborative driving approach is the mechanism and content of inter-vehicle communication. We compare four different communication strategies: no collaboration (baseline), image-only sharing, LangPack-only sharing, and combined image+LangPack sharing.",
905
+ "bbox": [
906
+ 511,
907
+ 479,
908
+ 883,
909
+ 570
910
+ ],
911
+ "page_idx": 5
912
+ },
913
+ {
914
+ "type": "table",
915
+ "img_path": "images/b2555493d11a88f9f902d0c9d39d8c9ce761a9c952854418ed7ecf42bf082af6.jpg",
916
+ "table_caption": [
917
+ "Table 4. Performance comparison of different communication strategies. LangPack provides substantial performance gains with minimal bandwidth usage, while the combined approach achieves the highest overall performance."
918
+ ],
919
+ "table_footnote": [],
920
+ "table_body": "<table><tr><td rowspan=\"2\">Message</td><td colspan=\"2\">Vehicle 1</td><td colspan=\"2\">Vehicle 2</td><td rowspan=\"2\">TC(s)↓</td><td rowspan=\"2\">TB(KB)↓</td></tr><tr><td>DS↑</td><td>RC↑</td><td>DS↑</td><td>RC↑</td></tr><tr><td>Non-collab</td><td>13.5</td><td>33.1</td><td>11.35</td><td>29.44</td><td>200.1</td><td>0</td></tr><tr><td>Image (JPEG)</td><td>15.3</td><td>38.9</td><td>31.3</td><td>60.7</td><td>65.8</td><td>43.1</td></tr><tr><td>LangPack</td><td>35.1</td><td>71.6</td><td>42.8</td><td>80.1</td><td>114.6</td><td>1.8</td></tr><tr><td>Image+LangPack</td><td>33.1</td><td>74.9</td><td>48.8</td><td>90.3</td><td>124.6</td><td>44.9</td></tr></table>",
921
+ "bbox": [
922
+ 514,
923
+ 651,
924
+ 880,
925
+ 747
926
+ ],
927
+ "page_idx": 5
928
+ },
929
+ {
930
+ "type": "text",
931
+ "text": "As shown in Table 4, the non-collaborative baseline performs poorly with driving scores, which affirms the importance of multi-vehicular collaboration. The image-only strategy shows modest improvements over the non-collaborative baseline but falls significantly short of the LangPack-based methods. This suggests that raw visual data, while information-rich, may not be optimally structured for inter-vehicle understanding without additional processing. The LangPack-only approach achieves remarkable performance (Vehicle 1: DS",
932
+ "bbox": [
933
+ 511,
934
+ 761,
935
+ 883,
936
+ 912
937
+ ],
938
+ "page_idx": 5
939
+ },
940
+ {
941
+ "type": "page_number",
942
+ "text": "6",
943
+ "bbox": [
944
+ 493,
945
+ 936,
946
+ 504,
947
+ 946
948
+ ],
949
+ "page_idx": 5
950
+ },
951
+ {
952
+ "type": "text",
953
+ "text": "35.1, RC 71.6; Vehicle 2: DS 42.8, RC 80.1) while requiring minimal bandwidth (1.8 KB), demonstrating the exceptional efficiency of our natural language packaging approach. This represents a bandwidth reduction of over $96\\%$ compared to image sharing while delivering substantially better performance, The combined Image+LangPack approach achieves the highest overall performance, particularly for Vehicle 2 (DS: 48.8, RC: 90.3), but has highest bandwidth consumption (44.9 KB).",
954
+ "bbox": [
955
+ 109,
956
+ 90,
957
+ 480,
958
+ 241
959
+ ],
960
+ "page_idx": 6
961
+ },
962
+ {
963
+ "type": "text",
964
+ "text": "These results demonstrate that LangPack offers an exceptional balance between performance and communication efficiency, highlighting the information density and semantic richness of structured natural language representations. For bandwidth-constrained applications, LangPack-only communication provides nearoptimal performance with minimal data requirements. When bandwidth constraints are less severe, the combined approach offers incremental performance improvements at the cost of substantially higher data transmission.",
965
+ "bbox": [
966
+ 109,
967
+ 242,
968
+ 482,
969
+ 407
970
+ ],
971
+ "page_idx": 6
972
+ },
973
+ {
974
+ "type": "text",
975
+ "text": "4.5. LVLM Performance Comparison",
976
+ "text_level": 1,
977
+ "bbox": [
978
+ 112,
979
+ 417,
980
+ 405,
981
+ 434
982
+ ],
983
+ "page_idx": 6
984
+ },
985
+ {
986
+ "type": "text",
987
+ "text": "The choice of LVLM significantly impacts collaborative driving performance. We evaluate six popular vision-language models (GPT-4o, Claude-3.7 Sonnet, GPT4o-mini, Gemini Flash Lite 2.0, Qwen-2.5-VL-7B, and Llama 3.2 11B Vision Instruct) to determine their effectiveness within our framework. In the following, we refer these models as GPT-4o, Claude-3.7, GPT4o-mini, Gemini-2.0, Qwen-2.5, and Llama-3.2 respectively.",
988
+ "bbox": [
989
+ 109,
990
+ 440,
991
+ 482,
992
+ 561
993
+ ],
994
+ "page_idx": 6
995
+ },
996
+ {
997
+ "type": "table",
998
+ "img_path": "images/fad168078fd41822295acfac7c5bb5bc954d632134c2d799acfda03b05b17763.jpg",
999
+ "table_caption": [
1000
+ "Table 5. Performance comparison of different LVLMs. The top-tier commercial models (GPT-4o, Claude-3.7) demonstrate the strongest performance, with GPT-4o-mini offering competitive capabilities at lower computational cost."
1001
+ ],
1002
+ "table_footnote": [],
1003
+ "table_body": "<table><tr><td rowspan=\"2\">Model</td><td colspan=\"2\">Vehicle 1</td><td colspan=\"2\">Vehicle 2</td><td rowspan=\"2\">TC(s)↓</td></tr><tr><td>DS↑</td><td>RC↑</td><td>DS↑</td><td>RC↑</td></tr><tr><td>GPT-4o</td><td>41.3</td><td>70.0</td><td>47.7</td><td>91.0</td><td>79.0</td></tr><tr><td>Claude-3.7</td><td>32.0</td><td>67.0</td><td>72.1</td><td>94.1</td><td>88.5</td></tr><tr><td>GPT-4o-mini</td><td>33.1</td><td>74.9</td><td>48.8</td><td>90.3</td><td>124.6</td></tr><tr><td>Gemini-2.0</td><td>12.1</td><td>33.7</td><td>25.6</td><td>58.0</td><td>46.5</td></tr><tr><td>Qwen-2.5</td><td>15.5</td><td>32.2</td><td>19.4</td><td>28.8</td><td>70.7</td></tr><tr><td>Llama-3.2</td><td>11.6</td><td>31.1</td><td>19.0</td><td>42.2</td><td>102.5</td></tr></table>",
1004
+ "bbox": [
1005
+ 125,
1006
+ 641,
1007
+ 467,
1008
+ 760
1009
+ ],
1010
+ "page_idx": 6
1011
+ },
1012
+ {
1013
+ "type": "text",
1014
+ "text": "Table 5 shows that GPT-4o, Claude-3.7, and GPT-4o-mini consistently outperform other options across both vehicles, suggesting these models possess superior capabilities for understanding complex driving scenes and generating appropriate driving actions in collaborative contexts. The remaining models Gemini-2.0, Qwen-2.5, and Llama-3.2 demonstrate lower performance. Interestingly, Gemini-2.0 completes routes in the shortest time (46.5 seconds), suggesting more aggressive driving",
1015
+ "bbox": [
1016
+ 109,
1017
+ 775,
1018
+ 482,
1019
+ 912
1020
+ ],
1021
+ "page_idx": 6
1022
+ },
1023
+ {
1024
+ "type": "text",
1025
+ "text": "behavior that may prioritize speed over safety or adherence to traffic rules.",
1026
+ "bbox": [
1027
+ 511,
1028
+ 90,
1029
+ 880,
1030
+ 119
1031
+ ],
1032
+ "page_idx": 6
1033
+ },
1034
+ {
1035
+ "type": "text",
1036
+ "text": "4.6. Mixture Model Modular Design",
1037
+ "text_level": 1,
1038
+ "bbox": [
1039
+ 511,
1040
+ 131,
1041
+ 795,
1042
+ 148
1043
+ ],
1044
+ "page_idx": 6
1045
+ },
1046
+ {
1047
+ "type": "text",
1048
+ "text": "Our $\\mathrm{M}^{3}\\mathrm{CoT}$ architecture enables the use of different specialized LVLMs for distinct reasoning stages. To evaluate the potential benefits of this modular approach, we implement two experimental configurations with varying model assignments for each prompting stage. In Experiment 6.A, we use Gemini-2.0 for driving scene and interactive objects description, Llama-3.2 for navigation goal and feature intent description, and use GPT4o-mini for driving signal generation. In Experiment 6.B, we use Qwen-2.5 for driving scene and interactive objects description, Llama-3.2 for navigation goal and feature intent description, and use GPT4o-mini for driving signal generation.",
1049
+ "bbox": [
1050
+ 509,
1051
+ 152,
1052
+ 883,
1053
+ 349
1054
+ ],
1055
+ "page_idx": 6
1056
+ },
1057
+ {
1058
+ "type": "table",
1059
+ "img_path": "images/b45e9de02291555a81fe5a247438894b592655fe1aad2aa244bc301f09bcaec8.jpg",
1060
+ "table_caption": [
1061
+ "Table 6. Performance comparison of different Mixture Model Modular (M $^3$ CoT) configurations."
1062
+ ],
1063
+ "table_footnote": [],
1064
+ "table_body": "<table><tr><td rowspan=\"2\">M3CoT Setup</td><td colspan=\"2\">Vehicle 1</td><td colspan=\"2\">Vehicle 2</td><td rowspan=\"2\">TC(s)↓</td></tr><tr><td>DS↑</td><td>RC↑</td><td>DS↑</td><td>RC↑</td></tr><tr><td>GPT4o-mini</td><td>33.1</td><td>74.9</td><td>48.8</td><td>90.3</td><td>124.6</td></tr><tr><td>Exp 6.A</td><td>31.4</td><td>67.9</td><td>37.2</td><td>71.3</td><td>144.6</td></tr><tr><td>Exp 6.B</td><td>35.2</td><td>68.5</td><td>42.1</td><td>82.6</td><td>119.3</td></tr></table>",
1065
+ "bbox": [
1066
+ 522,
1067
+ 402,
1068
+ 870,
1069
+ 486
1070
+ ],
1071
+ "page_idx": 6
1072
+ },
1073
+ {
1074
+ "type": "text",
1075
+ "text": "From Table 6, in experiments 6.A and 6.B, we observe that replacing the reasoning modules with LVLMs other than GPT4o-mini results in slightly lower but still competitive performance compared to the pure GPT4o-mini model. Given that the API costs of Gemini-2.0 and Llama-3.2 are lower than that of GPT4o-mini, these experimental results suggest that in practical scenarios with limited computational budgets, our Mixture Model Modular Chain-of-thought module supports the possibility of replacing reasoning modules with a mixture of models.",
1076
+ "bbox": [
1077
+ 509,
1078
+ 502,
1079
+ 883,
1080
+ 667
1081
+ ],
1082
+ "page_idx": 6
1083
+ },
1084
+ {
1085
+ "type": "text",
1086
+ "text": "4.7. Heterogeneous Agents Evaluation",
1087
+ "text_level": 1,
1088
+ "bbox": [
1089
+ 511,
1090
+ 678,
1091
+ 808,
1092
+ 694
1093
+ ],
1094
+ "page_idx": 6
1095
+ },
1096
+ {
1097
+ "type": "text",
1098
+ "text": "In real-world deployments, collaborative driving systems will likely operate in environments where different vehicles utilize AI models with varying capabilities. To assess our framework's effectiveness in such heterogeneous settings, we conduct two experiments with vehicle pairs using different LVLMs. In experiment 7.A, the vehicles are equipped with GPT-4o-mini and Gemini-2.0, while in experiment 7.B, they are equipped with GPT-4o-mini and Llama-3.2.",
1099
+ "bbox": [
1100
+ 509,
1101
+ 700,
1102
+ 883,
1103
+ 834
1104
+ ],
1105
+ "page_idx": 6
1106
+ },
1107
+ {
1108
+ "type": "text",
1109
+ "text": "As shown in Table 7, collaboration improves both driving scores and route completion rates across both experiments. In experiment 7.A, pairing GPT-4o-mini with Gemini-2.0, and in experiment 7.B, pairing GPT-4o-mini with Llama-3.2, both vehicles benefit from the",
1110
+ "bbox": [
1111
+ 511,
1112
+ 835,
1113
+ 883,
1114
+ 910
1115
+ ],
1116
+ "page_idx": 6
1117
+ },
1118
+ {
1119
+ "type": "page_number",
1120
+ "text": "7",
1121
+ "bbox": [
1122
+ 493,
1123
+ 935,
1124
+ 504,
1125
+ 946
1126
+ ],
1127
+ "page_idx": 6
1128
+ },
1129
+ {
1130
+ "type": "image",
1131
+ "img_path": "images/9e9a0a6057fe7dfa281afc5c7aa13aa62df16bb82fa911dd3ffcb7b0b3afccbb.jpg",
1132
+ "image_caption": [
1133
+ "Figure 2. Visualization of a natural-language-based collaborative driving scenario. CAV 2 slows down upon receiving the 'slow down' intent description from CAV 1. The context is slightly paraphrased for better visualization."
1134
+ ],
1135
+ "image_footnote": [],
1136
+ "bbox": [
1137
+ 125,
1138
+ 89,
1139
+ 519,
1140
+ 354
1141
+ ],
1142
+ "page_idx": 7
1143
+ },
1144
+ {
1145
+ "type": "image",
1146
+ "img_path": "images/4db12cd0df73c24a8babd9e59327a42b696856ebaa0f7ba25afadbe4151f45b7.jpg",
1147
+ "image_caption": [],
1148
+ "image_footnote": [],
1149
+ "bbox": [
1150
+ 527,
1151
+ 90,
1152
+ 875,
1153
+ 354
1154
+ ],
1155
+ "page_idx": 7
1156
+ },
1157
+ {
1158
+ "type": "table",
1159
+ "img_path": "images/a69af74c30d05b372e27f947c7051caeb43ea106d9d8cdd0a9c30083f7f1eacc.jpg",
1160
+ "table_caption": [
1161
+ "Table 7. Performance in heterogeneous agent configurations where vehicles use different LVLMs."
1162
+ ],
1163
+ "table_footnote": [],
1164
+ "table_body": "<table><tr><td></td><td colspan=\"2\">Heterogeneous Setup</td><td>DS↑</td><td>RC↑</td><td>TC(s)↓</td></tr><tr><td rowspan=\"4\">Exp 7.A</td><td rowspan=\"2\">Non-collab</td><td>GPT-4o-mini</td><td>18.2</td><td>56.1</td><td>167.3</td></tr><tr><td>Gemini-2.0</td><td>12.6</td><td>61.1</td><td>167.3</td></tr><tr><td rowspan=\"2\">Image+LangPack</td><td>GPT-4o-mini</td><td>59.1</td><td>73.2</td><td>126.8</td></tr><tr><td>Gemini-2.0</td><td>45.3</td><td>70.2</td><td>126.8</td></tr><tr><td rowspan=\"4\">Exp 7.B</td><td rowspan=\"2\">Non-collab</td><td>GPT-4o-mini</td><td>16.7</td><td>70.2</td><td>142.0</td></tr><tr><td>Llama-3.2</td><td>11.5</td><td>51.0</td><td>142.0</td></tr><tr><td rowspan=\"2\">Image+LangPack</td><td>GPT-4o-mini</td><td>51.9</td><td>96.1</td><td>144.5</td></tr><tr><td>Llama-3.2</td><td>12.6</td><td>40.1</td><td>144.5</td></tr></table>",
1165
+ "bbox": [
1166
+ 114,
1167
+ 459,
1168
+ 496,
1169
+ 588
1170
+ ],
1171
+ "page_idx": 7
1172
+ },
1173
+ {
1174
+ "type": "text",
1175
+ "text": "collaborative setup. This demonstrates that our framework is adaptable not only to homogeneous settings but also to heterogeneous environments.",
1176
+ "bbox": [
1177
+ 111,
1178
+ 616,
1179
+ 483,
1180
+ 662
1181
+ ],
1182
+ "page_idx": 7
1183
+ },
1184
+ {
1185
+ "type": "text",
1186
+ "text": "4.8. Visualization",
1187
+ "text_level": 1,
1188
+ "bbox": [
1189
+ 112,
1190
+ 676,
1191
+ 250,
1192
+ 691
1193
+ ],
1194
+ "page_idx": 7
1195
+ },
1196
+ {
1197
+ "type": "text",
1198
+ "text": "Figure 2 displays a scenario where a leading CAV approaches an intersection and decides to slow down. After sharing its intent 'slow down' with other CAVs, the following vehicle also decides to slow down despite originally intending to continue forward. This demonstrates effective collaborative decision-making, as the follower vehicle appropriately adjusts its behavior based on the other CAV's communicated intent. The example illustrates how language-based communication enables real-time adaptive driving behaviors, enhancing overall traffic safety through multi-agent decision-level collaboration. Furthermore, this interaction highlights the practical value of our framework in translating natural language intents into concrete driving decisions",
1199
+ "bbox": [
1200
+ 111,
1201
+ 700,
1202
+ 483,
1203
+ 912
1204
+ ],
1205
+ "page_idx": 7
1206
+ },
1207
+ {
1208
+ "type": "text",
1209
+ "text": "across multiple autonomous vehicles. For more visualization results, please refer to our anonymous project page https://xiangbogaobarry.github.io/LangCoop/.",
1210
+ "bbox": [
1211
+ 511,
1212
+ 422,
1213
+ 883,
1214
+ 469
1215
+ ],
1216
+ "page_idx": 7
1217
+ },
1218
+ {
1219
+ "type": "text",
1220
+ "text": "5. Discussion, Limitations, and Future Work",
1221
+ "text_level": 1,
1222
+ "bbox": [
1223
+ 511,
1224
+ 484,
1225
+ 883,
1226
+ 518
1227
+ ],
1228
+ "page_idx": 7
1229
+ },
1230
+ {
1231
+ "type": "text",
1232
+ "text": "Our experiments with LangCoop reveal several key insights that inform future research directions:",
1233
+ "bbox": [
1234
+ 511,
1235
+ 530,
1236
+ 883,
1237
+ 560
1238
+ ],
1239
+ "page_idx": 7
1240
+ },
1241
+ {
1242
+ "type": "text",
1243
+ "text": "Advantage of Zero-shot LVLMs. Despite benefits of domain-specific training for LVLMs, zero-shot approaches offer clear advantages. They eliminate costly dataset collection and training while maintaining adaptability across diverse driving scenarios. Additionally, proprietary models like GPT and Gemini series cannot be fine-tuned by third parties. A zero-shot pipeline that leverages all LVLMs without domain-specific finetuning provides flexibility and accessibility for resource-limited institute.",
1244
+ "bbox": [
1245
+ 511,
1246
+ 561,
1247
+ 883,
1248
+ 712
1249
+ ],
1250
+ "page_idx": 7
1251
+ },
1252
+ {
1253
+ "type": "text",
1254
+ "text": "Computational and Latency Concerns. Regarding computational concerns, we note that LVLM efficiency is rapidly improving, and large models can generate trajectories for training more compact deployment models. Some novel dual-system designs[33, 44] may also alleviate the computational intensity. The conceptual advantages of language-based collaboration outweigh current computational demands, opening new possibilities for interpretable, efficient, and adaptable multi-agent driving systems.",
1255
+ "bbox": [
1256
+ 511,
1257
+ 713,
1258
+ 883,
1259
+ 864
1260
+ ],
1261
+ "page_idx": 7
1262
+ },
1263
+ {
1264
+ "type": "text",
1265
+ "text": "Prompting Strategies for Driving. We observed significant sensitivity to prompt formulation in driving contexts. For example, we observed that explicitly in-",
1266
+ "bbox": [
1267
+ 511,
1268
+ 866,
1269
+ 883,
1270
+ 912
1271
+ ],
1272
+ "page_idx": 7
1273
+ },
1274
+ {
1275
+ "type": "page_number",
1276
+ "text": "8",
1277
+ "bbox": [
1278
+ 493,
1279
+ 935,
1280
+ 503,
1281
+ 946
1282
+ ],
1283
+ "page_idx": 7
1284
+ },
1285
+ {
1286
+ "type": "text",
1287
+ "text": "structuring the model to \"avoid collisions\" (which might seem obvious in driving) substantially improved performance. This suggests that current LVLMs may not fully internalize driving-specific common knowledge. This indicates potential for improvement through specialized prompts or fine-tuning approaches focused on autonomous driving scenarios.",
1288
+ "bbox": [
1289
+ 111,
1290
+ 90,
1291
+ 480,
1292
+ 196
1293
+ ],
1294
+ "page_idx": 8
1295
+ },
1296
+ {
1297
+ "type": "text",
1298
+ "text": "Physical-Informed Control Integration. Our current implementation does not fully incorporate detailed vehicle dynamics into the planning pipeline. Future extensions could address this by integrating physical vehicle models (e.g., bicycle model). Using techniques like quintic polynomial trajectory planning could ensure physically realizable motion while preserving the high-level reasoning capabilities of language models.",
1299
+ "bbox": [
1300
+ 111,
1301
+ 196,
1302
+ 480,
1303
+ 316
1304
+ ],
1305
+ "page_idx": 8
1306
+ },
1307
+ {
1308
+ "type": "text",
1309
+ "text": "Expanding V2X Communication Paradigms. While we currently focus on vehicle-to-vehicle communication, the approach naturally extends to broader V2X ecosystems [37]. The unstructured nature of messages like Emergency Vehicle Alerts, Signal Phase and Timing, and Roadside Safety Alerts[63] aligns well with natural language representations. Future research could incorporate these additional message types, leveraging language models' ability to process diverse information streams within a unified framework.",
1310
+ "bbox": [
1311
+ 111,
1312
+ 316,
1313
+ 480,
1314
+ 467
1315
+ ],
1316
+ "page_idx": 8
1317
+ },
1318
+ {
1319
+ "type": "text",
1320
+ "text": "6. Conclusion",
1321
+ "text_level": 1,
1322
+ "bbox": [
1323
+ 112,
1324
+ 481,
1325
+ 230,
1326
+ 496
1327
+ ],
1328
+ "page_idx": 8
1329
+ },
1330
+ {
1331
+ "type": "text",
1332
+ "text": "This work introduces LangCoop, a novel framework that leverages natural language as a communication medium for multi-agent collaborative driving. We introduce Mixture Model Modular Chain-of-thought (M3CoT) for reasoning and the Natural Language Information Packaging (LangPack) for efficient data sharing. Extensive closed-loop experiments in simulation environments demonstrate that language-based collaboration not only reduces bandwidth requirements but also enhances driving performance and interpretability by including decision-level communication. Looking forward, further optimization of prompting strategies and deeper integration of vehicle dynamics promise to extend the capabilities of language-driven autonomous systems, marking a significant step toward safer and more efficient collaborative driving.",
1333
+ "bbox": [
1334
+ 111,
1335
+ 506,
1336
+ 480,
1337
+ 748
1338
+ ],
1339
+ "page_idx": 8
1340
+ },
1341
+ {
1342
+ "type": "text",
1343
+ "text": "References",
1344
+ "text_level": 1,
1345
+ "bbox": [
1346
+ 114,
1347
+ 761,
1348
+ 209,
1349
+ 776
1350
+ ],
1351
+ "page_idx": 8
1352
+ },
1353
+ {
1354
+ "type": "list",
1355
+ "sub_type": "ref_text",
1356
+ "list_items": [
1357
+ "[1] Eduardo Arnold, Mehrdad Dianati, Robert de Temple, and Saber Fallah. Cooperative perception for 3d object detection in driving scenarios using infrastructure sensors. IEEE Transactions on Intelligent Transportation Systems, 23(3):1852-1864, 2020. 3",
1358
+ "[2] Zhengwei Bai, Guoyuan Wu, Matthew J Barth, Yongkang Liu, Emrah Akin Sisbot, and Kentaro Oguchi. Pillargrid: Deep learning-based cooperative perception for 3d object detection from onboard-roadside lidar. In"
1359
+ ],
1360
+ "bbox": [
1361
+ 120,
1362
+ 786,
1363
+ 480,
1364
+ 911
1365
+ ],
1366
+ "page_idx": 8
1367
+ },
1368
+ {
1369
+ "type": "list",
1370
+ "sub_type": "ref_text",
1371
+ "list_items": [
1372
+ "2022 IEEE 25th International Conference on Intelligent Transportation Systems (ITSC), pages 1743-1749. IEEE, 2022. 3",
1373
+ "[3] Qi Chen, Sihai Tang, Qing Yang, and Song Fu. Cooper: Cooperative perception for connected autonomous vehicles based on 3d point clouds. In 2019 IEEE 39th International Conference on Distributed Computing Systems (ICDCS), pages 514-524. IEEE, 2019. 3",
1374
+ "[4] Hsu-kuang Chiu, Ryo Hachiuma, Chien-Yi Wang, Stephen F Smith, Yu-Chiang Frank Wang, and MinHung Chen. V2v-llm: Vehicle-to-vehicle cooperative autonomous driving with multi-modal large language models. arXiv preprint arXiv:2502.09980, 2025.1,3",
1375
+ "[5] Can Cui, Zichong Yang, Yupeng Zhou, Juntong Peng, Sung-Yeon Park, Cong Zhang, Yunsheng Ma, Xu Cao, Wenqian Ye, Yiheng Feng, et al. On-board vision-language models for personalized autonomous vehicle motion control: System design and real-world validation. arXiv preprint arXiv:2411.11913, 2024. 1",
1376
+ "[6] Jiaxun Cui, Hang Qiu, Dian Chen, Peter Stone, and Yuke Zhu. Coopernaut: End-to-end driving with cooperative perception for networked vehicles. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17252-17262, 2022. 3",
1377
+ "[7] Jiaxun Cui, Chen Tang, Jarrett Holtz, Janice Nguyen, Alessandro G Allievi, Hang Qiu, and Peter Stone. Talking vehicles: Cooperative driving via natural language, 2025. 1",
1378
+ "[8] Longchao Da, Tiejin Chen, Zhuoheng Li, Shreyas Bachi-ruj, Huaiyuan Yao, Xiyang Hu, Zhengzhong Tu, Yue Zhao, Dongjie Wang, Ram Pendyala, et al. Generative ai in transportation planning: A survey. arXiv preprint arXiv:2503.07158, 2025. 2, 3",
1379
+ "[9] Zeyu Dong, Yimin Zhu, Yansong Li, Kevin Mahon, and Yu Sun. Generalizing end-to-end autonomous driving in real-world environments using zero-shot llms. arXiv preprint arXiv:2411.14256, 2024. 2",
1380
+ "[10] Alexey Dosovitskiy, German Ros, Felipe Codevilla, Antonio Lopez, and Vladlen Koltun. Carla: An open urban driving simulator. In Conference on robot learning, pages 1-16. PMLR, 2017. 2, 5",
1381
+ "[11] Yiqun Duan, Qiang Zhang, and Renjing Xu. Prompting multi-modal tokens to enhance end-to-end autonomous driving imitation learning with llms. arXiv preprint arXiv:2404.04869, 2024.3",
1382
+ "[12] Shiyu Fang, Jiaqi Liu, Mingyu Ding, Yiming Cui, Chen Lv, Peng Hang, and Jian Sun. Towards interactive and learnable cooperative driving automation: a large language model-driven decision-making framework. arXiv preprint arXiv:2409.12812, 2024.3",
1383
+ "[13] Chen Fu, Chiyu Dong, Christoph Mertz, and John M Dolan. Depth completion via inductive fusion of planar lidar and monocular camera. In 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 10843-10848. IEEE, 2020. 3",
1384
+ "[14] Hongbo Gao, Bo Cheng, Jianqiang Wang, Keqiang Li, Jianhui Zhao, and Deyi Li. Object classification using cnn-based fusion of vision and lidar in autonomous vehi"
1385
+ ],
1386
+ "bbox": [
1387
+ 514,
1388
+ 92,
1389
+ 883,
1390
+ 911
1391
+ ],
1392
+ "page_idx": 8
1393
+ },
1394
+ {
1395
+ "type": "page_number",
1396
+ "text": "9",
1397
+ "bbox": [
1398
+ 493,
1399
+ 935,
1400
+ 504,
1401
+ 946
1402
+ ],
1403
+ "page_idx": 8
1404
+ },
1405
+ {
1406
+ "type": "list",
1407
+ "sub_type": "ref_text",
1408
+ "list_items": [
1409
+ "cle environment. IEEE Transactions on Industrial Informatics, 14(9):4224-4231, 2018. 3",
1410
+ "[15] Xiangbo Gao, Runsheng Xu, Jiachen Li, Ziran Wang, Zhiwen Fan, and Zhengzhong Tu. Stamp: Scalable task and model-agnostic collaborative perception. arXiv preprint arXiv:2501.18616, 2025. 3, 5",
1411
+ "[16] Nathaniel Moore Glaser and Zsolt Kira. We need to talk: Identifying and overcoming communication-critical scenarios for self-driving. arXiv preprint arXiv:2305.04352, 2023. 3",
1412
+ "[17] Aaron Grattafori, Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Alex Vaughan, et al. The llama 3 herd of models. arXiv eprints, pages arXiv-2407, 2024. 2",
1413
+ "[18] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025. 2",
1414
+ "[19] Jingda Guo, Dominic Carrillo, Sihai Tang, Qi Chen, Qing Yang, Song Fu, Xi Wang, Nannan Wang, and Paparao Palacharla. Coff: Cooperative spatial feature fusion for 3-d object detection on autonomous vehicles. IEEE Internet of Things Journal, 8(14):11078-11087, 2021. 3",
1415
+ "[20] Senkang Hu, Zhengru Fang, Zihan Fang, Yiqin Deng, Xianhao Chen, and Yuguang Fang. Agentscodriver: Large language model empowered collaborative driving with lifelong learning. arXiv preprint arXiv:2404.06345, 2024. 3",
1416
+ "[21] Yue Hu, Yifan Lu, Runsheng Xu, Weidi Xie, Siheng Chen, and Yanfeng Wang. Collaboration helps camera overtake lidar in 3d detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9243-9252, 2023. 3",
1417
+ "[22] Yue Hu, Xianghe Pang, Xiaoqi Qin, Yonina C Eldar, Siheng Chen, Ping Zhang, and Wenjun Zhang. Pragmatic communication in multi-agent collaborative perception. arXiv preprint arXiv:2401.12694, 2024. 1",
1418
+ "[23] Yue Huang, Chujie Gao, Siyuan Wu, Haoran Wang, Xiangqi Wang, Yujun Zhou, Yanbo Wang, Jiayi Ye, Jiawen Shi, Qihui Zhang, et al. On the trustworthiness of generative foundation models: Guideline, assessment, and perspective. arXiv preprint arXiv:2502.14296, 2025. 2",
1419
+ "[24] Jyh-Jing Hwang, Runsheng Xu, Hubert Lin, Wei-Chih Hung, Jingwei Ji, Kristy Choi, Di Huang, Tong He, Paul Covington, Benjamin Sapp, et al. Emma: End-to-end multimodal model for autonomous driving. arXiv preprint arXiv:2410.23262, 2024. 2",
1420
+ "[25] Bo Jiang, Shaoyu Chen, Qian Zhang, Wenyu Liu, and Xinggang Wang. Alphadrive: Unleashing the power of vlms in autonomous driving via reinforcement learning and reasoning, 2025. 1",
1421
+ "[26] Zhengxing Lan, Lingshan Liu, Bo Fan, Yisheng Lv, Yi long Ren, and Zhiyong Cui. Traj-llm: A new exploration for empowering trajectory prediction with pre-trained large language models. IEEE Transactions on Intelligent Vehicles, 2024. 3"
1422
+ ],
1423
+ "bbox": [
1424
+ 114,
1425
+ 92,
1426
+ 482,
1427
+ 910
1428
+ ],
1429
+ "page_idx": 9
1430
+ },
1431
+ {
1432
+ "type": "list",
1433
+ "sub_type": "ref_text",
1434
+ "list_items": [
1435
+ "[27] Yun Li, Kai Katsumata, Ehsan Javanmardi, and Manabu Tsukada. Large language models for human-like autonomous driving: A survey. arXiv preprint arXiv:2407.19280, 2024. 3",
1436
+ "[28] Chengsi Liang, Hongyang Du, Yao Sun, Dusit Niyato, Jiawen Kang, Dezong Zhao, and Muhammad Ali Imran. Generative ai-driven semantic communication networks: Architecture, technologies and applications. IEEE Transactions on Cognitive Communications and Networking, 2024. 3",
1437
+ "[29] Genjia Liu, Yue Hu, Chenxin Xu, Weibo Mao, Junhao Ge, Zhengxiang Huang, Yifan Lu, Yinda Xu, Junkai Xia, Yafei Wang, et al. Towards collaborative autonomous driving: Simulation platform and end-to-end system. arXiv preprint arXiv:2404.09496, 2024.3",
1438
+ "[30] Si Liu, Chen Gao, Yuan Chen, Xingyu Peng, Xianghao Kong, Kun Wang, Runsheng Xu, Wentao Jiang, Hao Xiang, Jiaqi Ma, et al. Towards vehicle-to-everything autonomous driving: A survey on collaborative perception. arXiv preprint arXiv:2308.16714, 2023. 1",
1439
+ "[31] Yifan Lu, Yue Hu, Yiqi Zhong, Dequan Wang, Siheng Chen, and Yanfeng Wang. An extensible framework for open heterogeneous collaborative perception. arXiv preprint arXiv:2401.13964, 2024. 5",
1440
+ "[32] Xuewen Luo, Chenxi Liu, Fan Ding, Fengze Yang, Yang Zhou, Junnyong Loo, and Hwa Hui Tew. Senserag: Constructing environmental knowledge bases with proactive querying for llm-based autonomous driving. In Proceedings of the Winter Conference on Applications of Computer Vision, pages 989-996, 2025. 2",
1441
+ "[33] Jianbiao Mei, Yukai Ma, Xuemeng Yang, Licheng Wen, Xinyu Cai, Xin Li, Daocheng Fu, Bo Zhang, Pinlong Cai, Min Dou, et al. Continuously learning, adapting, and improving: A dual-process approach to autonomous driving. arXiv preprint arXiv:2405.15324, 2024. 2, 8",
1442
+ "[34] Gledson Melotti, Cristiano Premebida, and Nuno Gonçalves. Multimodal deep-learning for object recognition combining camera and lidar data. In 2020 IEEE International Conference on Autonomous Robot Systems and Competitions (ICARSC), pages 177-182. IEEE, 2020. 3",
1443
+ "[35] OpenAI. Introducing chatgpt. https://openai.com/index/chatgpt/, 2022. [Accessed 13-03-2025]. 2",
1444
+ "[36] OpenAI. Gpt-4o-mini, 2024. Large language model, available at https://openai.com.5",
1445
+ "[37] SAE International. V2X Communications Message Set Dictionary. Technical Report SAE J2735_202409, SAE International, 2024. Revised September 2024. 9",
1446
+ "[38] Hao Shao, Yuxuan Hu, Letian Wang, Guanglu Song, Steven L Waslander, Yu Liu, and Hongsheng Li. Lm-drive: Closed-loop end-to-end driving with large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15120-15130, 2024. 2",
1447
+ "[39] Shuyao Shi, Jiahe Cui, Zhehao Jiang, Zhenyu Yan, Guoliang Xing, Jianwei Niu, and Zhenchao Ouyang. Vips: Real-time perception fusion for infrastructure-assisted"
1448
+ ],
1449
+ "bbox": [
1450
+ 516,
1451
+ 92,
1452
+ 883,
1453
+ 910
1454
+ ],
1455
+ "page_idx": 9
1456
+ },
1457
+ {
1458
+ "type": "page_number",
1459
+ "text": "10",
1460
+ "bbox": [
1461
+ 490,
1462
+ 935,
1463
+ 508,
1464
+ 946
1465
+ ],
1466
+ "page_idx": 9
1467
+ },
1468
+ {
1469
+ "type": "list",
1470
+ "sub_type": "ref_text",
1471
+ "list_items": [
1472
+ "autonomous driving. In Proceedings of the 28th annual international conference on mobile computing and networking, pages 133-146, 2022. 3",
1473
+ "[40] Chonghao Sima, Katrin Renz, Kashyap Chitta, Li Chen, Hanxue Zhang, Chengen Xie, Jens BeiBwenger, Ping Luo, Andreas Geiger, and Hongyang Li. Drivelm: Driving with graph visual question answering. In European Conference on Computer Vision, pages 256-274. Springer, 2024. 1, 2",
1474
+ "[41] Rui Song, Chenwei Liang, Hu Cao, Zhiran Yan, Walter Zimmer, Markus Gross, Andreas Festag, and Alois Knoll. Collaborative semantic occupancy prediction with hybrid feature fusion in connected automated vehicles. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17996-18006, 2024. 3",
1475
+ "[42] The San Francisco Standard. Stalled waymo creates traffic chaos in the mission. https://sfstandard.com/2023/03/03/stalled-waymo-creates-traffic-chaos-in-mission/, 2023. [Accessed 13-03-2025]. 2",
1476
+ "[43] Gemini Team, Rohan Anil, Sebastian Borgeaud, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, Katie Millican, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023. 2",
1477
+ "[44] Xiaoyu Tian, Junru Gu, Bailin Li, Yicheng Liu, Yang Wang, Zhiyong Zhao, Kun Zhan, Peng Jia, Xianpeng Lang, and Hang Zhao. Drivevm: The convergence of autonomous driving and large vision-language models. arXiv preprint arXiv:2402.12289, 2024. 1, 8",
1478
+ "[45] Binglu Wang, Lei Zhang, Zhaozhong Wang, Yongqiang Zhao, and Tianfei Zhou. Core: Cooperative reconstruction for multi-agent perception. In 2023 IEEE/CVF International Conference on Computer Vision (ICCV), pages 8676-8686. IEEE Computer Society, 2023. 3",
1479
+ "[46] Rujia Wang, Xiangbo Gao, Hao Xiang, Runsheng Xu, and Zhengzhong Tu. Cocmt: Communication-efficient cross-modal transformer for collaborative perception. arXiv preprint arXiv:2503.13504, 2025.",
1480
+ "[47] Tsun-Hsuan Wang, Sivabalan Manivasagam, Ming Liang, Bin Yang, Wenyuan Zeng, and Raquel Urtasun. V2vnet: Vehicle-to-vehicle communication for joint perception and prediction. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part II 16, pages 605–621. Springer, 2020. 3",
1481
+ "[48] Tsun-Hsuan Wang, Alaa Maalouf, Wei Xiao, Yutong Ban, Alexander Amini, Guy Rosman, Sertac Karaman, and Daniela Rus. Drive anywhere: Generalizable end-to-end autonomous driving with multi-modal foundation models. In 2024 IEEE International Conference on Robotics and Automation (ICRA), pages 6687-6694. IEEE, 2024. 2",
1482
+ "[49] Wenhai Wang, Jiangwei Xie, ChuanYang Hu, Haoming Zou, Jianan Fan, Wenwen Tong, Yang Wen, Silei Wu, Hanming Deng, Zhiqi Li, et al. Drivemlm: Aligning multi-modal large language models with behavioral"
1483
+ ],
1484
+ "bbox": [
1485
+ 114,
1486
+ 92,
1487
+ 483,
1488
+ 911
1489
+ ],
1490
+ "page_idx": 10
1491
+ },
1492
+ {
1493
+ "type": "list",
1494
+ "sub_type": "ref_text",
1495
+ "list_items": [
1496
+ "planning states for autonomous driving. arXiv preprint arXiv:2312.09245, 2023. 2",
1497
+ "[50] Keshu Wu, Pei Li, Yang Zhou, Rui Gan, Junwei You, Yang Cheng, Jingwen Zhu, Steven T Parker, Bin Ran, David A Noyce, et al. V2x-llm: Enhancing v2x integration and understanding in connected vehicle corridors. arXiv preprint arXiv:2503.02239, 2025. 2, 3",
1498
+ "[51] Shuo Xing, Hongyuan Hua, Xiangbo Gao, Shenzhe Zhu, Renjie Li, Kexin Tian, Xiaopeng Li, Heng Huang, Tianbao Yang, Zhangyang Wang, et al. Autotrust: Benchmarking trustworthiness in large vision language models for autonomous driving. arXiv preprint arXiv:2412.15206, 2024. 2",
1499
+ "[52] Shuo Xing, Chengyuan Qian, Yuping Wang, Hongyuan Hua, Kexin Tian, Yang Zhou, and Zhengzhong Tu. Openemma: Open-source multimodal model for end-to-end autonomous driving. In Proceedings of the Winter Conference on Applications of Computer Vision, pages 1001-1009, 2025. 1, 2, 4",
1500
+ "[53] Runsheng Xu, Hao Xiang, Zhengzhong Tu, Xin Xia, Ming-Hsuan Yang, and Jiaqi Ma. V2x-vit: Vehicle-to-everything cooperative perception with vision transformer. In European conference on computer vision, pages 107-124. Springer, 2022. 3",
1501
+ "[54] Runsheng Xu, Jinlong Li, Xiaoyu Dong, Hongkai Yu, and Jiaqi Ma. Bridging the domain gap for multi-agent perception. In 2023 IEEE International Conference on Robotics and Automation (ICRA), pages 6035-6042. IEEE, 2023. 3",
1502
+ "[55] Runsheng Xu, Zhengzhong Tu, Hao Xiang, Wei Shao, Bolei Zhou, and Jiaqi Ma. Cobevt: Cooperative bird's eye view semantic segmentation with sparse transformers. In Conference on Robot Learning, pages 989-1000. PMLR, 2023. 3",
1503
+ "[56] Zhenhua Xu, Yujia Zhang, Enze Xie, Zhen Zhao, Yong Guo, Kwan-Yee K Wong, Zhenguo Li, and Hengshuang Zhao. Drivegpt4: Interpretable end-to-end autonomous driving via large language model. IEEE Robotics and Automation Letters, 2024. 1, 2",
1504
+ "[57] Huaiyuan Yao, Longchao Da, Vishnu Nandam, Justin Turnau, Zhiwei Liu, Linsey Pang, and Hua Wei. Comal: Collaborative multi-agent large language models for mixed-autonomy traffic. arXiv preprint arXiv:2410.14368, 2024. 3",
1505
+ "[58] Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik Narasimhan, and Yuan Cao. React: Synergizing reasoning and acting in language models. In International Conference on Learning Representations (ICLR), 2023. 1",
1506
+ "[59] Junwei You, Haotian Shi, Zhuoyu Jiang, Zilin Huang, Rui Gan, Keshu Wu, Xi Cheng, Xiaopeng Li, and Bin Ran. V2x-vlm: End-to-end v2x cooperative autonomous driving through large vision-language models. arXiv preprint arXiv:2408.09251, 2024.3",
1507
+ "[60] Haibao Yu, Yingjuan Tang, Enze Xie, Jilei Mao, Jirui Yuan, Ping Luo, and Zaiqing Nie. Vehicle-infrastructure cooperative 3d object detection via feature flow prediction. arXiv preprint arXiv:2303.10552, 2023. 3"
1508
+ ],
1509
+ "bbox": [
1510
+ 516,
1511
+ 92,
1512
+ 883,
1513
+ 911
1514
+ ],
1515
+ "page_idx": 10
1516
+ },
1517
+ {
1518
+ "type": "page_number",
1519
+ "text": "11",
1520
+ "bbox": [
1521
+ 490,
1522
+ 935,
1523
+ 506,
1524
+ 946
1525
+ ],
1526
+ "page_idx": 10
1527
+ },
1528
+ {
1529
+ "type": "list",
1530
+ "sub_type": "ref_text",
1531
+ "list_items": [
1532
+ "[61] Wenyuan Zeng, Shenlong Wang, Renjie Liao, Yun Chen, Bin Yang, and Raquel Urtasun. Dsdnet: Deep structured self-driving network. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XXI 16, pages 156-172. Springer, 2020. 3",
1533
+ "[62] Ruichen Zhang, Ke Xiong, Hongyang Du, Dusit Niyato, Jiawen Kang, Xuemin Shen, and H Vincent Poor. Generative ai-enabled vehicular networks: Fundamentals, framework, and case study. IEEE Network, 2024. 3",
1534
+ "[63] Walter Zimmer, Ross Greer, Xingcheng Zhou, Rui Song, Marc Pavel, Daniel Lehmberg, Ahmed Ghita, Akshay Gopalkrishnan, Mohan Trivedi, and Alois Knoll. Enhancing highway safety: Accident detection on the a9 test stretch using roadside sensors. arXiv preprint arXiv:2502.00402, 2025. 9"
1535
+ ],
1536
+ "bbox": [
1537
+ 114,
1538
+ 90,
1539
+ 486,
1540
+ 330
1541
+ ],
1542
+ "page_idx": 11
1543
+ },
1544
+ {
1545
+ "type": "page_number",
1546
+ "text": "12",
1547
+ "bbox": [
1548
+ 490,
1549
+ 935,
1550
+ 509,
1551
+ 946
1552
+ ],
1553
+ "page_idx": 11
1554
+ },
1555
+ {
1556
+ "type": "text",
1557
+ "text": "LangCoop: Collaborative Driving with Language",
1558
+ "text_level": 1,
1559
+ "bbox": [
1560
+ 250,
1561
+ 85,
1562
+ 750,
1563
+ 109
1564
+ ],
1565
+ "page_idx": 12
1566
+ },
1567
+ {
1568
+ "type": "text",
1569
+ "text": "Supplementary Material",
1570
+ "text_level": 1,
1571
+ "bbox": [
1572
+ 382,
1573
+ 118,
1574
+ 614,
1575
+ 140
1576
+ ],
1577
+ "page_idx": 12
1578
+ },
1579
+ {
1580
+ "type": "text",
1581
+ "text": "Example of Natural Language Information Packaging (LangPack) - Sent by Agent 1, Received by Agent 0",
1582
+ "bbox": [
1583
+ 138,
1584
+ 157,
1585
+ 455,
1586
+ 186
1587
+ ],
1588
+ "page_idx": 12
1589
+ },
1590
+ {
1591
+ "type": "text",
1592
+ "text": "1.5 KB",
1593
+ "text_level": 1,
1594
+ "bbox": [
1595
+ 140,
1596
+ 200,
1597
+ 186,
1598
+ 213
1599
+ ],
1600
+ "page_idx": 12
1601
+ },
1602
+ {
1603
+ "type": "text",
1604
+ "text": "Agent 1, located at: [2.69048, 69.03092], current speed: $4.518\\mathrm{m / s}$",
1605
+ "bbox": [
1606
+ 138,
1607
+ 223,
1608
+ 455,
1609
+ 250
1610
+ ],
1611
+ "page_idx": 12
1612
+ },
1613
+ {
1614
+ "type": "text",
1615
+ "text": "It's scene description:",
1616
+ "text_level": 1,
1617
+ "bbox": [
1618
+ 138,
1619
+ 252,
1620
+ 276,
1621
+ 265
1622
+ ],
1623
+ "page_idx": 12
1624
+ },
1625
+ {
1626
+ "type": "text",
1627
+ "text": "The driving scenario shows a clear, daytime environment with sunny weather. The road appears to be well-maintained, featuring multiple lanes in both directions. There is moderate traffic, including vehicles such as cars and a motorcycle. Trees line the roadside, indicating a suburban or semi-rural area. Overall, conditions are favorable for driving.",
1628
+ "bbox": [
1629
+ 138,
1630
+ 265,
1631
+ 455,
1632
+ 361
1633
+ ],
1634
+ "page_idx": 12
1635
+ },
1636
+ {
1637
+ "type": "text",
1638
+ "text": "It's object description:",
1639
+ "text_level": 1,
1640
+ "bbox": [
1641
+ 138,
1642
+ 362,
1643
+ 281,
1644
+ 375
1645
+ ],
1646
+ "page_idx": 12
1647
+ },
1648
+ {
1649
+ "type": "list",
1650
+ "sub_type": "text",
1651
+ "list_items": [
1652
+ "1. Motorcycle",
1653
+ "- Location: Center lane, mid-distance",
1654
+ "- Status: Riding in the same direction as the vehicle",
1655
+ "- Intent: Likely continuing straight ahead on the road.",
1656
+ "2. Black SUV",
1657
+ "- Location: Right lane, close to the front of the vehicle",
1658
+ "- Status: Stationary",
1659
+ "- Intent: Appears to be waiting or preparing to merge into the lane.",
1660
+ "3. Red Car",
1661
+ "- Location: Left lane, further ahead",
1662
+ "- Status: Moving slowly, closer to the center divider",
1663
+ "- Intent: Preparing to turn or change lanes."
1664
+ ],
1665
+ "bbox": [
1666
+ 140,
1667
+ 376,
1668
+ 455,
1669
+ 555
1670
+ ],
1671
+ "page_idx": 12
1672
+ },
1673
+ {
1674
+ "type": "text",
1675
+ "text": "It's target description:",
1676
+ "text_level": 1,
1677
+ "bbox": [
1678
+ 140,
1679
+ 556,
1680
+ 279,
1681
+ 569
1682
+ ],
1683
+ "page_idx": 12
1684
+ },
1685
+ {
1686
+ "type": "text",
1687
+ "text": "The target is 0.15402 meters to my right and 32.39753 meters to my front.",
1688
+ "bbox": [
1689
+ 138,
1690
+ 570,
1691
+ 455,
1692
+ 597
1693
+ ],
1694
+ "page_idx": 12
1695
+ },
1696
+ {
1697
+ "type": "text",
1698
+ "text": "It's intent description:",
1699
+ "text_level": 1,
1700
+ "bbox": [
1701
+ 140,
1702
+ 598,
1703
+ 279,
1704
+ 609
1705
+ ],
1706
+ "page_idx": 12
1707
+ },
1708
+ {
1709
+ "type": "list",
1710
+ "sub_type": "text",
1711
+ "list_items": [
1712
+ "1. Check Environment: Identify the surrounding vehicles and road conditions.",
1713
+ "2. Speed Control: Maintain a speed and adhere to the speed limit.",
1714
+ "3. Adjust Direction: Slightly adjust to the right to align with the target location, ensuring no obstacles are in the way.",
1715
+ "4. Avoid Collisions: The car ahead is too close, slow down while adjusting to ensure safe distance."
1716
+ ],
1717
+ "bbox": [
1718
+ 140,
1719
+ 611,
1720
+ 455,
1721
+ 734
1722
+ ],
1723
+ "page_idx": 12
1724
+ },
1725
+ {
1726
+ "type": "text",
1727
+ "text": "Example of Natural Language Information Packaging (LangPack) - Sent by Agent 0, Received by Agent 1",
1728
+ "bbox": [
1729
+ 540,
1730
+ 157,
1731
+ 857,
1732
+ 186
1733
+ ],
1734
+ "page_idx": 12
1735
+ },
1736
+ {
1737
+ "type": "text",
1738
+ "text": "1.4 KB",
1739
+ "text_level": 1,
1740
+ "bbox": [
1741
+ 540,
1742
+ 200,
1743
+ 586,
1744
+ 213
1745
+ ],
1746
+ "page_idx": 12
1747
+ },
1748
+ {
1749
+ "type": "text",
1750
+ "text": "Agent 0, located at: [2.25215 80.50174], current speed: $5.630\\mathrm{m / s}$",
1751
+ "bbox": [
1752
+ 539,
1753
+ 223,
1754
+ 856,
1755
+ 250
1756
+ ],
1757
+ "page_idx": 12
1758
+ },
1759
+ {
1760
+ "type": "text",
1761
+ "text": "It's scene description:",
1762
+ "text_level": 1,
1763
+ "bbox": [
1764
+ 540,
1765
+ 252,
1766
+ 676,
1767
+ 263
1768
+ ],
1769
+ "page_idx": 12
1770
+ },
1771
+ {
1772
+ "type": "text",
1773
+ "text": "The scenario depicts a clear, sunny day with good visibility. The road is a multi-lane paved highway with light to moderate traffic. Several vehicles are moving in both directions, and there are trees lining the roadside, indicating a suburban or rural environment. The road condition appears smooth and well-maintained.",
1774
+ "bbox": [
1775
+ 539,
1776
+ 265,
1777
+ 856,
1778
+ 347
1779
+ ],
1780
+ "page_idx": 12
1781
+ },
1782
+ {
1783
+ "type": "text",
1784
+ "text": "It's object description:",
1785
+ "text_level": 1,
1786
+ "bbox": [
1787
+ 540,
1788
+ 349,
1789
+ 681,
1790
+ 362
1791
+ ],
1792
+ "page_idx": 12
1793
+ },
1794
+ {
1795
+ "type": "list",
1796
+ "sub_type": "text",
1797
+ "list_items": [
1798
+ "1. Car (front center): Positioned in the right lane, traveling at a moderate speed. Intent appears to be straightforward driving.",
1799
+ "2. Motorcycle (left center): Located in the left lane, approaching the vehicle. Likely intent is to overtake the car in the center lane.",
1800
+ "3. Car (right lane, further ahead): Positioned in the right lane, moving in the same direction as others. Status indicates normal driving behavior."
1801
+ ],
1802
+ "bbox": [
1803
+ 539,
1804
+ 362,
1805
+ 856,
1806
+ 486
1807
+ ],
1808
+ "page_idx": 12
1809
+ },
1810
+ {
1811
+ "type": "text",
1812
+ "text": "It's target description:",
1813
+ "text_level": 1,
1814
+ "bbox": [
1815
+ 540,
1816
+ 487,
1817
+ 681,
1818
+ 500
1819
+ ],
1820
+ "page_idx": 12
1821
+ },
1822
+ {
1823
+ "type": "text",
1824
+ "text": "The target is 0.86387 meters to by right and 36.0 meters to by front. The target is not an endpoint—continue moving forward after reaching it.",
1825
+ "bbox": [
1826
+ 539,
1827
+ 500,
1828
+ 856,
1829
+ 541
1830
+ ],
1831
+ "page_idx": 12
1832
+ },
1833
+ {
1834
+ "type": "text",
1835
+ "text": "It's intent description:",
1836
+ "text_level": 1,
1837
+ "bbox": [
1838
+ 540,
1839
+ 542,
1840
+ 679,
1841
+ 555
1842
+ ],
1843
+ "page_idx": 12
1844
+ },
1845
+ {
1846
+ "type": "list",
1847
+ "sub_type": "text",
1848
+ "list_items": [
1849
+ "1. Decelerate: Reduce speed to stay within the speed limit of $20\\mathrm{m / s}$",
1850
+ "2. Slightly Adjust Direction: Steer right towards the target (0.15402 meters to your right).",
1851
+ "3. Monitor Traffic: Vehicles are ahead. To ensure a safe distance, slow down or change lanes if necessary.",
1852
+ "4. Continue Forward: Maintain forward motion, adjusting as needed for further navigation."
1853
+ ],
1854
+ "bbox": [
1855
+ 540,
1856
+ 556,
1857
+ 856,
1858
+ 666
1859
+ ],
1860
+ "page_idx": 12
1861
+ },
1862
+ {
1863
+ "type": "page_number",
1864
+ "text": "1",
1865
+ "bbox": [
1866
+ 493,
1867
+ 935,
1868
+ 503,
1869
+ 946
1870
+ ],
1871
+ "page_idx": 12
1872
+ }
1873
+ ]